|
22 | 22 | from pymc.distributions.continuous import Flat, Normal, get_tau_sigma |
23 | 23 | from pymc.distributions.shape_utils import to_tuple |
24 | 24 |
|
| 25 | +from aesara.tensor.random.op import RandomVariable |
| 26 | + |
25 | 27 | __all__ = [ |
26 | 28 | "AR1", |
27 | 29 | "AR", |
|
32 | 34 | "MvStudentTRandomWalk", |
33 | 35 | ] |
34 | 36 |
|
| 37 | +class ARRV(RandomVariable): |
| 38 | + name = "AR" |
| 39 | + ndim_supp = 0 |
| 40 | + ndims_params = [1, 0, 0] |
| 41 | + dtype = "floatX" |
| 42 | + _print_name = ("AR", "\\operatorname{AR}") |
| 43 | + |
| 44 | + #set default values for parameters |
| 45 | + def __call__(self, phi, mu=0.0, sigma=1.0, init=None **kwargs) -> TensorVariable: |
| 46 | + return super().__call__(phi, mu, sigma,init **kwargs) |
| 47 | + |
| 48 | + @classmethod |
| 49 | + def rng_fn( |
| 50 | + cls, |
| 51 | + rng: np.random.default_rng(), |
| 52 | + phi: np.ndarray, |
| 53 | + mu: np.ndarray, |
| 54 | + sigma: np.ndarray, |
| 55 | + size: Tuple[int, ...], |
| 56 | + init: float) -> np.ndarray: |
| 57 | + |
| 58 | + # size parameter *should* be necessary for time series generation |
| 59 | + if size is None: |
| 60 | + raise ValueError('Specify size') |
| 61 | + |
| 62 | + # sign conventions used in signal.lfilter (or signal processing) |
| 63 | + phi = np.r_[1, -phi] # add zero-lag and negate |
| 64 | + |
| 65 | + #ifilter convolutes x with the coefficient theta to create a linear recurrence relation and generate the AR process |
| 66 | + if init is None: |
| 67 | + init = rng.normal(loc=mu, scale=sigma,size=size) |
| 68 | + return signal.lfilter(phi, init, axis=-1) |
35 | 69 |
|
36 | | -class AR1(distribution.Continuous): |
37 | | - """ |
38 | | - Autoregressive process with 1 lag. |
39 | | -
|
40 | | - Parameters |
41 | | - ---------- |
42 | | - k: tensor |
43 | | - effect of lagged value on current value |
44 | | - tau_e: tensor |
45 | | - precision for innovations |
46 | | - """ |
47 | | - |
48 | | - def __init__(self, k, tau_e, *args, **kwargs): |
49 | | - super().__init__(*args, **kwargs) |
50 | | - self.k = k = at.as_tensor_variable(k) |
51 | | - self.tau_e = tau_e = at.as_tensor_variable(tau_e) |
52 | | - self.tau = tau_e * (1 - k ** 2) |
53 | | - self.mode = at.as_tensor_variable(0.0) |
54 | | - |
55 | | - def logp(self, x): |
56 | | - """ |
57 | | - Calculate log-probability of AR1 distribution at specified value. |
58 | | -
|
59 | | - Parameters |
60 | | - ---------- |
61 | | - x: numeric |
62 | | - Value for which log-probability is calculated. |
63 | | -
|
64 | | - Returns |
65 | | - ------- |
66 | | - TensorVariable |
67 | | - """ |
68 | | - k = self.k |
69 | | - tau_e = self.tau_e # innovation precision |
70 | | - tau = tau_e * (1 - k ** 2) # ar1 precision |
71 | | - |
72 | | - x_im1 = x[:-1] |
73 | | - x_i = x[1:] |
74 | | - boundary = Normal.dist(0.0, tau=tau).logp |
75 | | - |
76 | | - innov_like = Normal.dist(k * x_im1, tau=tau_e).logp(x_i) |
77 | | - return boundary(x[0]) + at.sum(innov_like) |
78 | 70 |
|
79 | 71 |
|
80 | 72 | class AR(distribution.Continuous): |
|
0 commit comments