diff --git a/conda-envs/environment-dev-py37.yml b/conda-envs/environment-dev-py37.yml index 39a1d88f5c..fbbc2868ad 100644 --- a/conda-envs/environment-dev-py37.yml +++ b/conda-envs/environment-dev-py37.yml @@ -4,7 +4,7 @@ channels: - defaults dependencies: - aesara>=2.1.0 -- arviz>=0.11.2 +- arviz>=0.11.4 - cachetools>=4.2.1 - cloudpickle - fastprogress>=0.2.0 diff --git a/conda-envs/environment-dev-py38.yml b/conda-envs/environment-dev-py38.yml index 614b8e11f1..f83321d2f4 100644 --- a/conda-envs/environment-dev-py38.yml +++ b/conda-envs/environment-dev-py38.yml @@ -4,7 +4,7 @@ channels: - defaults dependencies: - aesara>=2.1.0 -- arviz>=0.11.2 +- arviz>=0.11.4 - cachetools>=4.2.1 - cloudpickle - fastprogress>=0.2.0 diff --git a/conda-envs/environment-dev-py39.yml b/conda-envs/environment-dev-py39.yml index fff17b0c6d..5aeff923ee 100644 --- a/conda-envs/environment-dev-py39.yml +++ b/conda-envs/environment-dev-py39.yml @@ -4,7 +4,7 @@ channels: - defaults dependencies: - aesara>=2.1.0 -- arviz>=0.11.2 +- arviz>=0.11.4 - cachetools>=4.2.1 - cloudpickle - fastprogress>=0.2.0 diff --git a/conda-envs/windows-environment-dev-py38.yml b/conda-envs/windows-environment-dev-py38.yml index 5259a74b57..e8ac54d3b0 100644 --- a/conda-envs/windows-environment-dev-py38.yml +++ b/conda-envs/windows-environment-dev-py38.yml @@ -5,7 +5,7 @@ channels: dependencies: # base dependencies (see install guide for Windows) - aesara>=2.1.0 -- arviz>=0.11.2 +- arviz>=0.11.4 - cachetools>=4.2.1 - cloudpickle - fastprogress>=0.2.0 diff --git a/pymc/backends/arviz.py b/pymc/backends/arviz.py index 3e2129702d..c08eb068ac 100644 --- a/pymc/backends/arviz.py +++ b/pymc/backends/arviz.py @@ -7,8 +7,6 @@ Any, Dict, Iterable, - List, - Mapping, Optional, Tuple, Union, @@ -21,9 +19,7 @@ from aesara.tensor.sharedvar import SharedVariable from aesara.tensor.subtensor import AdvancedIncSubtensor, AdvancedIncSubtensor1 from arviz import InferenceData, concat, rcParams -from arviz.data.base import CoordSpec, DimSpec -from arviz.data.base import dict_to_dataset as _dict_to_dataset -from arviz.data.base import generate_dims_coords, make_attrs, requires +from arviz.data.base import CoordSpec, DimSpec, dict_to_dataset, requires import pymc @@ -101,42 +97,6 @@ def insert(self, k: str, v, idx: int): self.trace_dict[k][idx, :] = v -def dict_to_dataset( - data, - library=None, - coords=None, - dims=None, - attrs=None, - default_dims=None, - skip_event_dims=None, - index_origin=None, -): - """Temporal workaround for dict_to_dataset. - - Once ArviZ>0.11.2 release is available, only two changes are needed for everything to work. - 1) this should be deleted, 2) dict_to_dataset should be imported as is from arviz, no underscore, - also remove unnecessary imports - """ - if default_dims is None: - return _dict_to_dataset( - data, - attrs=attrs, - library=library, - coords=coords, - dims=dims, - skip_event_dims=skip_event_dims, - ) - else: - out_data = {} - for name, vals in data.items(): - vals = np.atleast_1d(vals) - val_dims = dims.get(name) - val_dims, crds = generate_dims_coords(vals.shape, name, dims=val_dims, coords=coords) - crds = {key: xr.IndexVariable((key,), data=crds[key]) for key in val_dims} - out_data[name] = xr.DataArray(vals, dims=val_dims, coords=crds) - return xr.Dataset(data_vars=out_data, attrs=make_attrs(attrs=attrs, library=library)) - - class InferenceDataConverter: # pylint: disable=too-many-instance-attributes """Encapsulate InferenceData specific logic.""" @@ -160,7 +120,6 @@ def __init__( model=None, save_warmup: Optional[bool] = None, density_dist_obs: bool = True, - index_origin: Optional[int] = None, ): self.save_warmup = rcParams["data.save_warmup"] if save_warmup is None else save_warmup @@ -196,7 +155,6 @@ def __init__( self.posterior_predictive = posterior_predictive self.log_likelihood = log_likelihood self.predictions = predictions - self.index_origin = rcParams["data.index_origin"] if index_origin is None else index_origin def arbitrary_element(dct: Dict[Any, np.ndarray]) -> np.ndarray: return next(iter(dct.values())) @@ -344,7 +302,6 @@ def posterior_to_xarray(self): coords=self.coords, dims=self.dims, attrs=self.attrs, - index_origin=self.index_origin, ), dict_to_dataset( data_warmup, @@ -352,7 +309,6 @@ def posterior_to_xarray(self): coords=self.coords, dims=self.dims, attrs=self.attrs, - index_origin=self.index_origin, ), ) @@ -386,7 +342,6 @@ def sample_stats_to_xarray(self): dims=None, coords=self.coords, attrs=self.attrs, - index_origin=self.index_origin, ), dict_to_dataset( data_warmup, @@ -394,7 +349,6 @@ def sample_stats_to_xarray(self): dims=None, coords=self.coords, attrs=self.attrs, - index_origin=self.index_origin, ), ) @@ -427,7 +381,6 @@ def log_likelihood_to_xarray(self): dims=self.dims, coords=self.coords, skip_event_dims=True, - index_origin=self.index_origin, ), dict_to_dataset( data_warmup, @@ -435,7 +388,6 @@ def log_likelihood_to_xarray(self): dims=self.dims, coords=self.coords, skip_event_dims=True, - index_origin=self.index_origin, ), ) @@ -456,9 +408,7 @@ def translate_posterior_predictive_dict_to_xarray(self, dct) -> xr.Dataset: "This can mean that some draws or even whole chains are not represented.", k, ) - return dict_to_dataset( - data, library=pymc, coords=self.coords, dims=self.dims, index_origin=self.index_origin - ) + return dict_to_dataset(data, library=pymc, coords=self.coords, dims=self.dims) @requires(["posterior_predictive"]) def posterior_predictive_to_xarray(self): @@ -493,7 +443,6 @@ def priors_to_xarray(self): library=pymc, coords=self.coords, dims=self.dims, - index_origin=self.index_origin, ) ) return priors_dict @@ -510,7 +459,6 @@ def observed_data_to_xarray(self): coords=self.coords, dims=self.dims, default_dims=[], - index_origin=self.index_origin, ) @requires(["trace", "predictions"]) @@ -557,7 +505,6 @@ def is_data(name, var) -> bool: coords=self.coords, dims=self.dims, default_dims=[], - index_origin=self.index_origin, ) def to_inference_data(self): diff --git a/pymc/tests/test_distributions.py b/pymc/tests/test_distributions.py index a272a5658b..75dffe586d 100644 --- a/pymc/tests/test_distributions.py +++ b/pymc/tests/test_distributions.py @@ -1114,10 +1114,6 @@ def test_wald_logp(self): decimal=select_by_precision(float64=6, float32=1), ) - @pytest.mark.xfail( - condition=(aesara.config.floatX == "float32"), - reason="Poor CDF in SciPy. See scipy/scipy#869 for details.", - ) def test_wald_logcdf(self): self.check_logcdf( Wald, @@ -1273,6 +1269,10 @@ def modified_scipy_hypergeom_logcdf(value, N, k, n): {"N": NatSmall, "k": NatSmall, "n": NatSmall}, ) + @pytest.mark.xfail( + condition=(aesara.config.floatX == "float32"), + reason="SciPy log CDF stopped working after un-pinning NumPy version.", + ) def test_negative_binomial(self): def scipy_mu_alpha_logpmf(value, mu, alpha): return sp.nbinom.logpmf(value, alpha, 1 - mu / (mu + alpha)) diff --git a/requirements-dev.txt b/requirements-dev.txt index 84935561c3..a7d66e23cf 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -2,7 +2,7 @@ # See that file for comments about the need/usage of each dependency. aesara>=2.1.0 -arviz>=0.11.2 +arviz>=0.11.4 cachetools>=4.2.1 cloudpickle fastprogress>=0.2.0 diff --git a/requirements.txt b/requirements.txt index 8623a90ae7..223ca21304 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ aesara>=2.1.0 -arviz>=0.11.2 +arviz>=0.11.4 cachetools>=4.2.1 cloudpickle fastprogress>=0.2.0