Skip to content

Commit ced3de9

Browse files
committed
Revert "add RV for AR"
This reverts commit acc4fe9.
1 parent acc4fe9 commit ced3de9

File tree

1 file changed

+42
-34
lines changed

1 file changed

+42
-34
lines changed

pymc/distributions/timeseries.py

Lines changed: 42 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,6 @@
2222
from pymc.distributions.continuous import Flat, Normal, get_tau_sigma
2323
from pymc.distributions.shape_utils import to_tuple
2424

25-
from aesara.tensor.random.op import RandomVariable
26-
2725
__all__ = [
2826
"AR1",
2927
"AR",
@@ -34,39 +32,49 @@
3432
"MvStudentTRandomWalk",
3533
]
3634

37-
class ARRV(RandomVariable):
38-
name = "AR"
39-
ndim_supp = 0
40-
ndims_params = [1, 0, 0]
41-
dtype = "floatX"
42-
_print_name = ("AR", "\\operatorname{AR}")
43-
44-
#set default values for parameters
45-
def __call__(self, phi, mu=0.0, sigma=1.0, init=None **kwargs) -> TensorVariable:
46-
return super().__call__(phi, mu, sigma,init **kwargs)
47-
48-
@classmethod
49-
def rng_fn(
50-
cls,
51-
rng: np.random.default_rng(),
52-
phi: np.ndarray,
53-
mu: np.ndarray,
54-
sigma: np.ndarray,
55-
size: Tuple[int, ...],
56-
init: float) -> np.ndarray:
57-
58-
# size parameter *should* be necessary for time series generation
59-
if size is None:
60-
raise ValueError('Specify size')
61-
62-
# sign conventions used in signal.lfilter (or signal processing)
63-
phi = np.r_[1, -phi] # add zero-lag and negate
64-
65-
#ifilter convolutes x with the coefficient theta to create a linear recurrence relation and generate the AR process
66-
if init is None:
67-
init = rng.normal(loc=mu, scale=sigma,size=size)
68-
return signal.lfilter(phi, init, axis=-1)
6935

36+
class AR1(distribution.Continuous):
37+
"""
38+
Autoregressive process with 1 lag.
39+
40+
Parameters
41+
----------
42+
k: tensor
43+
effect of lagged value on current value
44+
tau_e: tensor
45+
precision for innovations
46+
"""
47+
48+
def __init__(self, k, tau_e, *args, **kwargs):
49+
super().__init__(*args, **kwargs)
50+
self.k = k = at.as_tensor_variable(k)
51+
self.tau_e = tau_e = at.as_tensor_variable(tau_e)
52+
self.tau = tau_e * (1 - k ** 2)
53+
self.mode = at.as_tensor_variable(0.0)
54+
55+
def logp(self, x):
56+
"""
57+
Calculate log-probability of AR1 distribution at specified value.
58+
59+
Parameters
60+
----------
61+
x: numeric
62+
Value for which log-probability is calculated.
63+
64+
Returns
65+
-------
66+
TensorVariable
67+
"""
68+
k = self.k
69+
tau_e = self.tau_e # innovation precision
70+
tau = tau_e * (1 - k ** 2) # ar1 precision
71+
72+
x_im1 = x[:-1]
73+
x_i = x[1:]
74+
boundary = Normal.dist(0.0, tau=tau).logp
75+
76+
innov_like = Normal.dist(k * x_im1, tau=tau_e).logp(x_i)
77+
return boundary(x[0]) + at.sum(innov_like)
7078

7179

7280
class AR(distribution.Continuous):

0 commit comments

Comments
 (0)