From 2d617e743d5a5cad4970f00189e44ed6e9ff7c93 Mon Sep 17 00:00:00 2001 From: Keewis Date: Thu, 4 Jun 2020 21:10:35 +0200 Subject: [PATCH 01/50] generate documentation for the a few missing CFTimeIndex attributes --- doc/api-hidden.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/api-hidden.rst b/doc/api-hidden.rst index efef4259b74..1c79b4ba19c 100644 --- a/doc/api-hidden.rst +++ b/doc/api-hidden.rst @@ -550,8 +550,10 @@ CFTimeIndex.asof CFTimeIndex.asof_locs CFTimeIndex.astype + CFTimeIndex.ceil CFTimeIndex.contains CFTimeIndex.copy + CFTimeIndex.days_in_month CFTimeIndex.delete CFTimeIndex.difference CFTimeIndex.drop @@ -562,6 +564,7 @@ CFTimeIndex.equals CFTimeIndex.factorize CFTimeIndex.fillna + CFTimeIndex.floor CFTimeIndex.format CFTimeIndex.get_indexer CFTimeIndex.get_indexer_for @@ -602,6 +605,7 @@ CFTimeIndex.reindex CFTimeIndex.rename CFTimeIndex.repeat + CFTimeIndex.round CFTimeIndex.searchsorted CFTimeIndex.set_names CFTimeIndex.set_value From edae8bc0d45e3bda0a30fb59031bbb88ea3b4c46 Mon Sep 17 00:00:00 2001 From: Keewis Date: Tue, 9 Jun 2020 22:18:10 +0200 Subject: [PATCH 02/50] properly use continuation lines --- xarray/backends/api.py | 4 ++-- xarray/core/combine.py | 2 +- xarray/core/dataarray.py | 2 +- xarray/core/dataset.py | 6 +++--- xarray/core/resample.py | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/xarray/backends/api.py b/xarray/backends/api.py index 8d7c2230b2d..54befcc7542 100644 --- a/xarray/backends/api.py +++ b/xarray/backends/api.py @@ -775,7 +775,7 @@ def open_mfdataset( combine : {'by_coords', 'nested'}, optional Whether ``xarray.combine_by_coords`` or ``xarray.combine_nested`` is used to combine all the data. Default is to use ``xarray.combine_by_coords``. - compat : {'identical', 'equals', 'broadcast_equals', + compat : {'identical', 'equals', 'broadcast_equals', \ 'no_conflicts', 'override'}, optional String indicating how to compare variables of the same name for potential conflicts when merging: @@ -1149,7 +1149,7 @@ def save_mfdataset( mode : {'w', 'a'}, optional Write ('w') or append ('a') mode. If mode='w', any existing file at these locations will be overwritten. - format : {'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_64BIT', + format : {'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_64BIT', \ 'NETCDF3_CLASSIC'}, optional File format for the resulting netCDF file: diff --git a/xarray/core/combine.py b/xarray/core/combine.py index 58bd7178fa2..65c2ffb2c63 100644 --- a/xarray/core/combine.py +++ b/xarray/core/combine.py @@ -375,7 +375,7 @@ def combine_nested( nested-list input along which to merge. Must be the same length as the depth of the list passed to ``datasets``. - compat : {'identical', 'equals', 'broadcast_equals', + compat : {'identical', 'equals', 'broadcast_equals', \ 'no_conflicts', 'override'}, optional String indicating how to compare variables of the same name for potential merge conflicts: diff --git a/xarray/core/dataarray.py b/xarray/core/dataarray.py index f84f5971080..65dd1dbbd48 100644 --- a/xarray/core/dataarray.py +++ b/xarray/core/dataarray.py @@ -3176,7 +3176,7 @@ def differentiate( The coordinate to be used to compute the gradient. edge_order: 1 or 2. Default 1 N-th order accurate differences at the boundaries. - datetime_unit: None or any of {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', + datetime_unit: None or any of {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', \ 'us', 'ns', 'ps', 'fs', 'as'} Unit to compute gradient. Only valid for datetime coordinate. diff --git a/xarray/core/dataset.py b/xarray/core/dataset.py index 1b0e01914f2..910fa2b72e1 100644 --- a/xarray/core/dataset.py +++ b/xarray/core/dataset.py @@ -1498,7 +1498,7 @@ def to_netcdf( Write ('w') or append ('a') mode. If mode='w', any existing file at this location will be overwritten. If mode='a', existing variables will be overwritten. - format : {'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_64BIT', + format : {'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_64BIT', \ 'NETCDF3_CLASSIC'}, optional File format for the resulting netCDF file: @@ -3636,7 +3636,7 @@ def merge( overwrite_vars : Hashable or iterable of Hashable, optional If provided, update variables of these name(s) without checking for conflicts in this dataset. - compat : {'broadcast_equals', 'equals', 'identical', + compat : {'broadcast_equals', 'equals', 'identical', \ 'no_conflicts'}, optional String indicating how to compare variables of the same name for potential conflicts: @@ -5441,7 +5441,7 @@ def differentiate(self, coord, edge_order=1, datetime_unit=None): The coordinate to be used to compute the gradient. edge_order: 1 or 2. Default 1 N-th order accurate differences at the boundaries. - datetime_unit: None or any of {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', + datetime_unit: None or any of {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', \ 'us', 'ns', 'ps', 'fs', 'as'} Unit to compute gradient. Only valid for datetime coordinate. diff --git a/xarray/core/resample.py b/xarray/core/resample.py index 2b3b7da6217..1b4cb49d719 100644 --- a/xarray/core/resample.py +++ b/xarray/core/resample.py @@ -130,7 +130,7 @@ def interpolate(self, kind="linear"): Parameters ---------- - kind : str {'linear', 'nearest', 'zero', 'slinear', + kind : {'linear', 'nearest', 'zero', 'slinear', \ 'quadratic', 'cubic'} Interpolation scheme to use From c9c6a95861792366b9e9b58634f05c351af5c439 Mon Sep 17 00:00:00 2001 From: Keewis Date: Sun, 26 Jul 2020 12:50:58 +0200 Subject: [PATCH 03/50] add a missing quote --- xarray/core/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xarray/core/common.py b/xarray/core/common.py index c95df77313e..29192612f0f 100644 --- a/xarray/core/common.py +++ b/xarray/core/common.py @@ -1011,7 +1011,7 @@ def resample( Whether to skip missing values when aggregating in downsampling. closed : 'left' or 'right', optional Side of each interval to treat as closed. - label : 'left or 'right', optional + label : 'left' or 'right', optional Side of each interval to use for labeling. base : int, optional For frequencies that evenly subdivide 1 day, the "origin" of the From b7794bdac84c319e2f43341946eec99d61e84ac7 Mon Sep 17 00:00:00 2001 From: Keewis Date: Sun, 26 Jul 2020 12:53:30 +0200 Subject: [PATCH 04/50] delete the note about the removed auto_combine --- doc/combining.rst | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/doc/combining.rst b/doc/combining.rst index ffc6575c579..adf46c4e0bc 100644 --- a/doc/combining.rst +++ b/doc/combining.rst @@ -244,16 +244,6 @@ in this manner. Combining along multiple dimensions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. note:: - - There are currently three combining functions with similar names: - :py:func:`~xarray.auto_combine`, :py:func:`~xarray.combine_by_coords`, and - :py:func:`~xarray.combine_nested`. This is because - ``auto_combine`` is in the process of being deprecated in favour of the other - two functions, which are more general. If your code currently relies on - ``auto_combine``, then you will be able to get similar functionality by using - ``combine_nested``. - For combining many objects along multiple dimensions xarray provides :py:func:`~xarray.combine_nested` and :py:func:`~xarray.combine_by_coords`. These functions use a combination of ``concat`` and ``merge`` across different From 66ed0e0350aad96b68e4afbb314d13de75ed694e Mon Sep 17 00:00:00 2001 From: Keewis Date: Sun, 26 Jul 2020 12:56:15 +0200 Subject: [PATCH 05/50] replace auto_combine with combine_by_coords --- doc/dask.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/dask.rst b/doc/dask.rst index de25ee2200e..678a701b5bc 100644 --- a/doc/dask.rst +++ b/doc/dask.rst @@ -90,7 +90,7 @@ use :py:func:`~xarray.open_mfdataset`:: xr.open_mfdataset('my/files/*.nc', parallel=True) This function will automatically concatenate and merge datasets into one in -the simple cases that it understands (see :py:func:`~xarray.auto_combine` +the simple cases that it understands (see :py:func:`~xarray.combine_by_coords` for the full disclaimer). By default, :py:meth:`~xarray.open_mfdataset` will chunk each netCDF file into a single Dask array; again, supply the ``chunks`` argument to control the size of the resulting Dask arrays. In more complex cases, you can From 04b9cc88c9460dd3085fcade7d31b73312513286 Mon Sep 17 00:00:00 2001 From: Keewis Date: Sun, 26 Jul 2020 12:57:17 +0200 Subject: [PATCH 06/50] set the current module in dask.rst --- doc/dask.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/dask.rst b/doc/dask.rst index 678a701b5bc..4844967350b 100644 --- a/doc/dask.rst +++ b/doc/dask.rst @@ -1,3 +1,5 @@ +.. currentmodule:: xarray + .. _dask: Parallel computing with Dask From d56a9f5fe011d000d7b3f8ff2918dd80fc5f1af4 Mon Sep 17 00:00:00 2001 From: Keewis Date: Sun, 26 Jul 2020 13:42:55 +0200 Subject: [PATCH 07/50] fix some links in whats-new.rst and generate doc pages for the plot functions --- doc/api-hidden.rst | 10 ++++++++++ doc/whats-new.rst | 6 +++--- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/doc/api-hidden.rst b/doc/api-hidden.rst index 1c79b4ba19c..a806951ae90 100644 --- a/doc/api-hidden.rst +++ b/doc/api-hidden.rst @@ -538,6 +538,16 @@ ufuncs.tanh ufuncs.trunc + plot.plot + plot.line + plot.step + plot.hist + plot.contour + plot.contourf + plot.imshow + plot.pcolormesh + plot.scatter + plot.FacetGrid.map_dataarray plot.FacetGrid.set_titles plot.FacetGrid.set_ticks diff --git a/doc/whats-new.rst b/doc/whats-new.rst index 2ad2a426532..a83bd6623a7 100644 --- a/doc/whats-new.rst +++ b/doc/whats-new.rst @@ -85,7 +85,7 @@ Breaking changes `_. (:pull:`3274`) By `Elliott Sales de Andrade `_ -- The old :py:func:`auto_combine` function has now been removed in +- The old ``auto_combine`` function has now been removed in favour of the :py:func:`combine_by_coords` and :py:func:`combine_nested` functions. This also means that the default behaviour of :py:func:`open_mfdataset` has changed to use @@ -99,7 +99,7 @@ New Features ~~~~~~~~~~~~ - :py:meth:`DataArray.argmin` and :py:meth:`DataArray.argmax` now support sequences of 'dim' arguments, and if a sequence is passed return a dict - (which can be passed to :py:meth:`isel` to get the value of the minimum) of + (which can be passed to :py:meth:`DataArray.isel` to get the value of the minimum) of the indices for each dimension of the minimum or maximum of a DataArray. (:pull:`3936`) By `John Omotani `_, thanks to `Keisuke Fujii @@ -1104,7 +1104,7 @@ New functions/methods ``combine_by_coords`` to combine datasets along multiple dimensions, by specifying the argument ``combine='nested'`` or ``combine='by_coords'``. - The older function :py:func:`~xarray.auto_combine` has been deprecated, + The older function ``auto_combine`` has been deprecated, because its functionality has been subsumed by the new functions. To avoid FutureWarnings switch to using ``combine_nested`` or ``combine_by_coords``, (or set the ``combine`` argument in From 8bb0a4758fa91def00ac98b092af45119ef22bf3 Mon Sep 17 00:00:00 2001 From: Keewis Date: Sun, 26 Jul 2020 14:16:33 +0200 Subject: [PATCH 08/50] add the new CFTimeIndex methods to api-hidden --- doc/api-hidden.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/doc/api-hidden.rst b/doc/api-hidden.rst index a806951ae90..489e89c3ecd 100644 --- a/doc/api-hidden.rst +++ b/doc/api-hidden.rst @@ -557,9 +557,12 @@ CFTimeIndex.any CFTimeIndex.append CFTimeIndex.argsort + CFTimeIndex.argmax + CFTimeIndex.argmin CFTimeIndex.asof CFTimeIndex.asof_locs CFTimeIndex.astype + CFTimeIndex.calendar CFTimeIndex.ceil CFTimeIndex.contains CFTimeIndex.copy From 3a8f318062f691612e7d984f01a5254dc53172b5 Mon Sep 17 00:00:00 2001 From: Keewis Date: Sun, 26 Jul 2020 14:17:57 +0200 Subject: [PATCH 09/50] don't link to CFTimeIndex.__repr__ --- doc/whats-new.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/whats-new.rst b/doc/whats-new.rst index a83bd6623a7..e4272df46fe 100644 --- a/doc/whats-new.rst +++ b/doc/whats-new.rst @@ -25,9 +25,9 @@ Breaking changes New Features ~~~~~~~~~~~~ -- Build :py:meth:`CFTimeIndex.__repr__` explicitly as :py:class:`pandas.Index`. Add ``calendar`` as a new +- Build ``CFTimeIndex.__repr__`` explicitly as :py:class:`pandas.Index`. Add ``calendar`` as a new property for :py:class:`CFTimeIndex` and show ``calendar`` and ``length`` in - :py:meth:`CFTimeIndex.__repr__` (:issue:`2416`, :pull:`4092`) + ``CFTimeIndex.__repr__`` (:issue:`2416`, :pull:`4092`) `Aaron Spring `_. From d997769e5d8e93736f84ce5ff1d1df5e5126bdc6 Mon Sep 17 00:00:00 2001 From: Keewis Date: Sun, 26 Jul 2020 19:27:22 +0200 Subject: [PATCH 10/50] don't try to link to the datetime accessor --- doc/weather-climate.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/weather-climate.rst b/doc/weather-climate.rst index f03dfd14c73..4ed11d34d7a 100644 --- a/doc/weather-climate.rst +++ b/doc/weather-climate.rst @@ -85,7 +85,7 @@ infer the sampling frequency of a :py:class:`~xarray.CFTimeIndex` or a 1-D With :py:meth:`~xarray.CFTimeIndex.strftime` we can also easily generate formatted strings from the datetime values of a :py:class:`~xarray.CFTimeIndex` directly or through the -:py:meth:`~xarray.DataArray.dt` accessor for a :py:class:`~xarray.DataArray` +``dt`` accessor for a :py:class:`~xarray.DataArray` using the same formatting as the standard `datetime.strftime`_ convention . .. _datetime.strftime: https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior From baab87bef111bd0ff676a1be79e4318846f17710 Mon Sep 17 00:00:00 2001 From: Keewis Date: Sun, 26 Jul 2020 19:29:52 +0200 Subject: [PATCH 11/50] fix a few more docstrings --- xarray/backends/api.py | 4 ++-- xarray/backends/file_manager.py | 3 ++- xarray/core/common.py | 4 ++-- xarray/core/computation.py | 14 ++++++++------ xarray/plot/dataset_plot.py | 14 +++++++------- xarray/tutorial.py | 8 ++++---- 6 files changed, 25 insertions(+), 22 deletions(-) diff --git a/xarray/backends/api.py b/xarray/backends/api.py index 54befcc7542..195ff40ad20 100644 --- a/xarray/backends/api.py +++ b/xarray/backends/api.py @@ -1144,7 +1144,7 @@ def save_mfdataset( ---------- datasets : list of xarray.Dataset List of datasets to save. - paths : list of str or list of Paths + paths : list of str or list of Path List of paths to which to save each corresponding dataset. mode : {'w', 'a'}, optional Write ('w') or append ('a') mode. If mode='w', any existing file at @@ -1180,7 +1180,7 @@ def save_mfdataset( default engine is chosen based on available dependencies, with a preference for 'netcdf4' if writing to a file on disk. See `Dataset.to_netcdf` for additional information. - compute: boolean + compute: bool If true compute immediately, otherwise return a ``dask.delayed.Delayed`` object that can be computed later. diff --git a/xarray/backends/file_manager.py b/xarray/backends/file_manager.py index 4967788a1e7..549426b5d07 100644 --- a/xarray/backends/file_manager.py +++ b/xarray/backends/file_manager.py @@ -175,7 +175,8 @@ def acquire(self, needs_lock=True): Returns ------- - An open file object, as returned by ``opener(*args, **kwargs)``. + file-like + An open file object, as returned by ``opener(*args, **kwargs)``. """ file, _ = self._acquire_with_cache_info(needs_lock) return file diff --git a/xarray/core/common.py b/xarray/core/common.py index 29192612f0f..2558d57a7d2 100644 --- a/xarray/core/common.py +++ b/xarray/core/common.py @@ -1321,7 +1321,7 @@ def full_like(other, fill_value, dtype: DTypeLike = None): Parameters ---------- - other : DataArray, Dataset, or Variable + other : DataArray, Dataset or Variable The reference object in input fill_value : scalar Value to fill the new object with before returning it. @@ -1445,7 +1445,7 @@ def zeros_like(other, dtype: DTypeLike = None): Parameters ---------- - other : DataArray, Dataset, or Variable + other : DataArray, Dataset or Variable The reference object. The output will have the same dimensions and coordinates as this object. dtype : dtype, optional dtype of the new array. If omitted, it defaults to other.dtype. diff --git a/xarray/core/computation.py b/xarray/core/computation.py index 94d4c6b1540..418da7a0f78 100644 --- a/xarray/core/computation.py +++ b/xarray/core/computation.py @@ -1414,22 +1414,24 @@ def where(cond, x, y): Performs xarray-like broadcasting across input arguments. + All dimension coordinates on `x` and `y` must be aligned with each + other and with `cond`. + + Parameters ---------- - cond : scalar, array, Variable, DataArray or Dataset with boolean dtype + cond : scalar, array, Variable, DataArray or Dataset When True, return values from `x`, otherwise returns values from `y`. x : scalar, array, Variable, DataArray or Dataset values to choose from where `cond` is True y : scalar, array, Variable, DataArray or Dataset values to choose from where `cond` is False - All dimension coordinates on these objects must be aligned with each - other and with `cond`. - Returns ------- - In priority order: Dataset, DataArray, Variable or array, whichever - type appears as an input argument. + Dataset, DataArray, Variable or array + In priority order: Dataset, DataArray, Variable or array, whichever + type appears as an input argument. Examples -------- diff --git a/xarray/plot/dataset_plot.py b/xarray/plot/dataset_plot.py index ea037c1a2c2..19a88788c99 100644 --- a/xarray/plot/dataset_plot.py +++ b/xarray/plot/dataset_plot.py @@ -170,14 +170,14 @@ def _dsplot(plotfunc): ---------- ds : Dataset - x, y : string + x, y : str Variable names for x, y axis. hue: str, optional Variable by which to color scattered points hue_style: str, optional Can be either 'discrete' (legend) or 'continuous' (color bar). - markersize: str, optional (scatter only) - Variably by which to vary size of scattered points + markersize: str, optional + scatter only. Variable by which to vary size of scattered points. size_norm: optional Either None or 'Norm' instance to normalize the 'markersize' variable. add_guide: bool, optional @@ -185,13 +185,13 @@ def _dsplot(plotfunc): - for "discrete", build a legend. This is the default for non-numeric `hue` variables. - for "continuous", build a colorbar - row : string, optional + row : str, optional If passed, make row faceted plots on this dimension name - col : string, optional + col : str, optional If passed, make column faceted plots on this dimension name - col_wrap : integer, optional + col_wrap : int, optional Use together with ``col`` to wrap faceted plots - ax : matplotlib axes, optional + ax : matplotlib.axes.Axes, optional If None, uses the current axis. Not applicable when using facets. subplot_kws : dict, optional Dictionary of keyword arguments for matplotlib subplots. Only applies diff --git a/xarray/tutorial.py b/xarray/tutorial.py index d662f2fcaaf..63867cb5045 100644 --- a/xarray/tutorial.py +++ b/xarray/tutorial.py @@ -45,13 +45,13 @@ def open_dataset( Name of the file containing the dataset. If no suffix is given, assumed to be netCDF ('.nc' is appended) e.g. 'air_temperature' - cache_dir : string, optional + cache_dir : str, optional The directory in which to search for and write cached data. - cache : boolean, optional + cache : bool, optional If True, then cache data locally for use on subsequent calls - github_url : string + github_url : str Github repository where the data is stored - branch : string + branch : str The git branch to download from kws : dict, optional Passed to xarray.open_dataset From f174f760f9d0c3e6e0d46dfa271cce772c4abfc7 Mon Sep 17 00:00:00 2001 From: Keewis Date: Sun, 26 Jul 2020 19:32:31 +0200 Subject: [PATCH 12/50] remove the non-html options (which are rarely used?) and silence flake8 --- doc/conf.py | 50 +++++++++++++++++++++++++------------------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index d3d126cb33f..8a1dbc4f7f0 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -53,14 +53,14 @@ matplotlib.use("Agg") try: - import rasterio + import rasterio # noqa: F401 except ImportError: allowed_failures.update( ["gallery/plot_rasterio_rgb.py", "gallery/plot_rasterio.py"] ) try: - import cartopy + import cartopy # noqa: F401 except ImportError: allowed_failures.update( [ @@ -275,21 +275,21 @@ # -- Options for LaTeX output --------------------------------------------- -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. - # 'preamble': '', -} +# latex_elements = { +# # The paper size ('letterpaper' or 'a4paper'). +# # 'papersize': 'letterpaper', +# # The font size ('10pt', '11pt' or '12pt'). +# # 'pointsize': '10pt', +# # Additional stuff for the LaTeX preamble. +# # 'preamble': '', +# } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). -latex_documents = [ - ("index", "xarray.tex", "xarray Documentation", "xarray Developers", "manual") -] +# latex_documents = [ +# ("index", "xarray.tex", "xarray Documentation", "xarray Developers", "manual") +# ] # The name of an image file (relative to this directory) to place at the top of # the title page. @@ -316,7 +316,7 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [("index", "xarray", "xarray Documentation", ["xarray Developers"], 1)] +# man_pages = [("index", "xarray", "xarray Documentation", ["xarray Developers"], 1)] # If true, show URL addresses after external links. # man_show_urls = False @@ -327,17 +327,17 @@ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) -texinfo_documents = [ - ( - "index", - "xarray", - "xarray Documentation", - "xarray Developers", - "xarray", - "N-D labeled arrays and datasets in Python.", - "Miscellaneous", - ) -] +# texinfo_documents = [ +# ( +# "index", +# "xarray", +# "xarray Documentation", +# "xarray Developers", +# "xarray", +# "N-D labeled arrays and datasets in Python.", +# "Miscellaneous", +# ) +# ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] From 1f36ca4f0ce706577e9396e14a87e47d485a432a Mon Sep 17 00:00:00 2001 From: Keewis Date: Sun, 26 Jul 2020 19:33:15 +0200 Subject: [PATCH 13/50] add type aliases --- doc/conf.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/doc/conf.py b/doc/conf.py index 8a1dbc4f7f0..9515f4131e7 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -111,6 +111,25 @@ napoleon_use_param = True napoleon_use_rtype = True +napoleon_type_aliases = { + # general terms + "sequence": ":term:`sequence`", + "hashable": ":term:`hashable`", + "iterable": ":term:`iterable`", + "callable": ":term:`callable`", + "dict_like": ":term:`dict-like `", + "dict-like": ":term:`dict-like `", + "mapping": ":term:`mapping`", + # numpy terms + "array_like": ":term:`array_like`", + "array-like": ":term:`array-like `", + # "scalar": ":term:`scalar`", + "array": ":term:`array`", + # objects without namespace + "ndarray": "~numpy.ndarray", + "ComplexWarning": "", + "Path": "~pathlib.Path", +} numpydoc_class_members_toctree = True numpydoc_show_class_members = False From b76cb75283609c667d970cbe901bc6f33f16c020 Mon Sep 17 00:00:00 2001 From: Keewis Date: Sun, 26 Jul 2020 21:14:21 +0200 Subject: [PATCH 14/50] map ComplexWarning to numpy.ComplexWarning --- doc/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/conf.py b/doc/conf.py index 9515f4131e7..4c7eaff82e0 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -127,7 +127,7 @@ "array": ":term:`array`", # objects without namespace "ndarray": "~numpy.ndarray", - "ComplexWarning": "", + "ComplexWarning": "~numpy.ComplexWarning", "Path": "~pathlib.Path", } From 05e204e501cd199f5ca8d591be578c01ab2cc8fd Mon Sep 17 00:00:00 2001 From: Keewis Date: Sun, 26 Jul 2020 22:27:52 +0200 Subject: [PATCH 15/50] fix more docstrings --- doc/conf.py | 4 ++++ xarray/core/common.py | 2 +- xarray/plot/dataset_plot.py | 18 ++++++++++-------- 3 files changed, 15 insertions(+), 9 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index 4c7eaff82e0..2f7347fc4e8 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -120,13 +120,17 @@ "dict_like": ":term:`dict-like `", "dict-like": ":term:`dict-like `", "mapping": ":term:`mapping`", + "file-like": ":term:`file-like `", # numpy terms "array_like": ":term:`array_like`", "array-like": ":term:`array-like `", # "scalar": ":term:`scalar`", "array": ":term:`array`", + # matplotlib terms + "color-like": ":py:func:`is_color_like`", # objects without namespace "ndarray": "~numpy.ndarray", + "dtype": "~numpy.dtype", "ComplexWarning": "~numpy.ComplexWarning", "Path": "~pathlib.Path", } diff --git a/xarray/core/common.py b/xarray/core/common.py index 2558d57a7d2..f712454d51a 100644 --- a/xarray/core/common.py +++ b/xarray/core/common.py @@ -1452,7 +1452,7 @@ def zeros_like(other, dtype: DTypeLike = None): Returns ------- - out : same as object + out New object of zeros with the same shape and type as other. Examples diff --git a/xarray/plot/dataset_plot.py b/xarray/plot/dataset_plot.py index 19a88788c99..d9fe83ab217 100644 --- a/xarray/plot/dataset_plot.py +++ b/xarray/plot/dataset_plot.py @@ -212,14 +212,16 @@ def _dsplot(plotfunc): ``center``. Setting both values prevents use of a diverging colormap. If discrete levels are provided as an explicit list, both of these values are ignored. - cmap : matplotlib colormap name or object, optional - The mapping from data values to color space. If not provided, this - will be either be ``viridis`` (if the function infers a sequential - dataset) or ``RdBu_r`` (if the function infers a diverging dataset). - When `Seaborn` is installed, ``cmap`` may also be a `seaborn` - color palette. If ``cmap`` is seaborn color palette and the plot type - is not ``contour`` or ``contourf``, ``levels`` must also be specified. - colors : discrete colors to plot, optional + cmap : str or matplotlib.colors.Colormap, optional + The mapping from data values to color space. Either a + matplotlib colormap name or object. If not provided, this will + be either ``viridis`` (if the function infers a sequential + dataset) or ``RdBu_r`` (if the function infers a diverging + dataset). When `Seaborn` is installed, ``cmap`` may also be a + `seaborn` color palette. If ``cmap`` is seaborn color palette + and the plot type is not ``contour`` or ``contourf``, ``levels`` + must also be specified. + colors : color-like or list of color-like, optional A single color or a list of colors. If the plot type is not ``contour`` or ``contourf``, the ``levels`` argument is required. center : float, optional From 66caef9db9d5ffdb9d52b322dd27a1968fd8af9e Mon Sep 17 00:00:00 2001 From: Keewis Date: Sun, 26 Jul 2020 22:36:57 +0200 Subject: [PATCH 16/50] add some missing methods and properties to api-hidden --- doc/api-hidden.rst | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/doc/api-hidden.rst b/doc/api-hidden.rst index 489e89c3ecd..b8162c0aa0e 100644 --- a/doc/api-hidden.rst +++ b/doc/api-hidden.rst @@ -52,6 +52,7 @@ core.rolling.DatasetCoarsen.var core.rolling.DatasetCoarsen.boundary core.rolling.DatasetCoarsen.coord_func + core.rolling.DatasetCoarsen.keep_attrs core.rolling.DatasetCoarsen.obj core.rolling.DatasetCoarsen.side core.rolling.DatasetCoarsen.trim_excess @@ -104,6 +105,8 @@ core.resample.DatasetResample.dims core.resample.DatasetResample.groups + core.rolling.DatasetRolling.argmax + core.rolling.DatasetRolling.argmin core.rolling.DatasetRolling.count core.rolling.DatasetRolling.max core.rolling.DatasetRolling.mean @@ -115,11 +118,15 @@ core.rolling.DatasetRolling.var core.rolling.DatasetRolling.center core.rolling.DatasetRolling.dim + core.rolling.DatasetRolling.keep_attrs core.rolling.DatasetRolling.min_periods core.rolling.DatasetRolling.obj core.rolling.DatasetRolling.rollings core.rolling.DatasetRolling.window + core.weighted.DatasetWeighted.obj + core.weighted.DatasetWeighted.weights + core.rolling_exp.RollingExp.mean Dataset.argsort @@ -188,6 +195,7 @@ core.rolling.DataArrayCoarsen.var core.rolling.DataArrayCoarsen.boundary core.rolling.DataArrayCoarsen.coord_func + core.rolling.DataArrayCoarsen.keep_attrs core.rolling.DataArrayCoarsen.obj core.rolling.DataArrayCoarsen.side core.rolling.DataArrayCoarsen.trim_excess @@ -238,6 +246,8 @@ core.resample.DataArrayResample.dims core.resample.DataArrayResample.groups + core.rolling.DataArrayRolling.argmax + core.rolling.DataArrayRolling.argmin core.rolling.DataArrayRolling.count core.rolling.DataArrayRolling.max core.rolling.DataArrayRolling.mean @@ -249,11 +259,15 @@ core.rolling.DataArrayRolling.var core.rolling.DataArrayRolling.center core.rolling.DataArrayRolling.dim + core.rolling.DataArrayRolling.keep_attrs core.rolling.DataArrayRolling.min_periods core.rolling.DataArrayRolling.obj core.rolling.DataArrayRolling.window core.rolling.DataArrayRolling.window_labels + core.weighted.DataArrayWeighted.obj + core.weighted.DataArrayWeighted.weights + DataArray.argsort DataArray.clip DataArray.conj @@ -277,6 +291,13 @@ core.accessor_dt.DatetimeAccessor.days_in_month core.accessor_dt.DatetimeAccessor.daysinmonth core.accessor_dt.DatetimeAccessor.hour + core.accessor_dt.DatetimeAccessor.is_leap_year + core.accessor_dt.DatetimeAccessor.is_month_end + core.accessor_dt.DatetimeAccessor.is_month_start + core.accessor_dt.DatetimeAccessor.is_quarter_end + core.accessor_dt.DatetimeAccessor.is_quarter_start + core.accessor_dt.DatetimeAccessor.is_year_end + core.accessor_dt.DatetimeAccessor.is_year_start core.accessor_dt.DatetimeAccessor.microsecond core.accessor_dt.DatetimeAccessor.minute core.accessor_dt.DatetimeAccessor.month @@ -291,6 +312,14 @@ core.accessor_dt.DatetimeAccessor.weekofyear core.accessor_dt.DatetimeAccessor.year + core.accessor_dt.TimedeltaAccessor.ceil + core.accessor_dt.TimedeltaAccessor.floor + core.accessor_dt.TimedeltaAccessor.round + core.accessor_dt.TimedeltaAccessor.days + core.accessor_dt.TimedeltaAccessor.microseconds + core.accessor_dt.TimedeltaAccessor.nanoseconds + core.accessor_dt.TimedeltaAccessor.seconds + core.accessor_str.StringAccessor.capitalize core.accessor_str.StringAccessor.center core.accessor_str.StringAccessor.contains @@ -365,6 +394,7 @@ Variable.min Variable.no_conflicts Variable.notnull + Variable.pad Variable.prod Variable.quantile Variable.rank @@ -407,6 +437,8 @@ IndexVariable.all IndexVariable.any + IndexVariable.argmax + IndexVariable.argmin IndexVariable.argsort IndexVariable.astype IndexVariable.broadcast_equals @@ -436,6 +468,7 @@ IndexVariable.min IndexVariable.no_conflicts IndexVariable.notnull + IndexVariable.pad IndexVariable.prod IndexVariable.quantile IndexVariable.rank @@ -712,18 +745,23 @@ backends.NetCDF4DataStore.lock backends.NetCDF4DataStore.variables + backends.H5NetCDFStore.autoclose backends.H5NetCDFStore.close backends.H5NetCDFStore.encode backends.H5NetCDFStore.encode_attribute backends.H5NetCDFStore.encode_variable + backends.H5NetCDFStore.format backends.H5NetCDFStore.get backends.H5NetCDFStore.get_attrs backends.H5NetCDFStore.get_dimensions backends.H5NetCDFStore.get_encoding backends.H5NetCDFStore.get_variables backends.H5NetCDFStore.items + backends.H5NetCDFStore.is_remote backends.H5NetCDFStore.keys backends.H5NetCDFStore.load + backends.H5NetCDFStore.lock + backends.H5NetCDFStore.open backends.H5NetCDFStore.open_store_variable backends.H5NetCDFStore.prepare_variable backends.H5NetCDFStore.set_attribute From a7daa90d282533be7e8ea2862b00acc0d5e8893a Mon Sep 17 00:00:00 2001 From: Keewis Date: Wed, 29 Jul 2020 22:34:53 +0200 Subject: [PATCH 17/50] more aliases --- doc/conf.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/doc/conf.py b/doc/conf.py index 2f7347fc4e8..6e3ae2a5dbb 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -129,10 +129,18 @@ # matplotlib terms "color-like": ":py:func:`is_color_like`", # objects without namespace + "DataArray": "xarray.DataArray", + "Dataset": "xarray.Dataset", "ndarray": "~numpy.ndarray", "dtype": "~numpy.dtype", "ComplexWarning": "~numpy.ComplexWarning", - "Path": "~pathlib.Path", + "Index": "~pandas.Index", + "MultiIndex": "~pandas.MultiIndex", + "CategoricalIndex": "~pandas.CategoricalIndex", + "Path": "~~pathlib.Path", + # objects with abbreviated namespace (from pandas) + "pd.Index": "~pandas.Index", + "pd.NaT": "~pandas.NaT", } numpydoc_class_members_toctree = True From 3881799ac86904d2d31fa0dc61741a0b4959f84f Mon Sep 17 00:00:00 2001 From: Keewis Date: Wed, 29 Jul 2020 22:37:49 +0200 Subject: [PATCH 18/50] fix a few more docstrings --- xarray/core/common.py | 16 +++++++++------- xarray/core/dataarray.py | 19 +++++++++++-------- 2 files changed, 20 insertions(+), 15 deletions(-) diff --git a/xarray/core/common.py b/xarray/core/common.py index f712454d51a..227ab6e80e5 100644 --- a/xarray/core/common.py +++ b/xarray/core/common.py @@ -408,7 +408,7 @@ def assign_coords(self, coords=None, **coords_kwargs): the first element the dimension name and the second element the values for this new coordinate. - **coords_kwargs : keyword, value pairs, optional + **coords_kwargs : optional The keyword arguments form of ``coords``. One of ``coords`` or ``coords_kwargs`` must be provided. @@ -484,8 +484,10 @@ def assign_attrs(self, *args, **kwargs): Parameters ---------- - args : positional arguments passed into ``attrs.update``. - kwargs : keyword arguments passed into ``attrs.update``. + args + positional arguments passed into ``attrs.update``. + kwargs + keyword arguments passed into ``attrs.update``. Returns ------- @@ -798,11 +800,11 @@ def rolling( dim: dict, optional Mapping from the dimension name to create the rolling iterator along (e.g. `time`) to its moving window size. - min_periods : int, default None + min_periods : int, default: None Minimum number of observations in window required to have a value (otherwise result is NA). The default, None, is equivalent to setting min_periods equal to the size of the window. - center : boolean, default False + center : bool, default: False Set the labels at the center of the window. keep_attrs : bool, optional If True, the object's attributes (`attrs`) will be copied from @@ -924,11 +926,11 @@ def coarsen( along (e.g., `time`). window : int Size of the moving window. - boundary : 'exact' | 'trim' | 'pad' + boundary : {"exact", "trim", "pad"}, default: "exact" If 'exact', a ValueError will be raised if dimension size is not a multiple of the window size. If 'trim', the excess entries are dropped. If 'pad', NA will be padded. - side : 'left' or 'right' or mapping from dimension to 'left' or 'right' + side : {'left', 'right'} or mapping of str to {"left", "right"} coord_func : function (name) that is applied to the coordinates, or a mapping from coordinate name to function (name). keep_attrs : bool, optional diff --git a/xarray/core/dataarray.py b/xarray/core/dataarray.py index 65dd1dbbd48..4dc32fedb55 100644 --- a/xarray/core/dataarray.py +++ b/xarray/core/dataarray.py @@ -282,7 +282,7 @@ def __init__( object, attempts are made to use this array's metadata to fill in other unspecified arguments. A view of the array's data is used instead of a copy if possible. - coords : sequence or dict of array_like objects, optional + coords : sequence or dict of array_like, optional Coordinates (tick labels) to use for indexing along each dimension. The following notations are accepted: @@ -1231,7 +1231,8 @@ def broadcast_like( Returns ------- - new_da: xr.DataArray + new_da : DataArray + The caller broadcasted against ``other``. Examples -------- @@ -3878,9 +3879,10 @@ def argmin( >>> array.isel(array.argmin(...)) array(-1) - >>> array = xr.DataArray([[[3, 2, 1], [3, 1, 2], [2, 1, 3]], - ... [[1, 3, 2], [2, -5, 1], [2, 3, 1]]], - ... dims=("x", "y", "z")) + >>> array = xr.DataArray( + ... [[[3, 2, 1], [3, 1, 2], [2, 1, 3]], [[1, 3, 2], [2, -5, 1], [2, 3, 1]]], + ... dims=("x", "y", "z"), + ... ) >>> array.min(dim="x") array([[ 1, 2, 1], @@ -3980,9 +3982,10 @@ def argmax( array(3) - >>> array = xr.DataArray([[[3, 2, 1], [3, 1, 2], [2, 1, 3]], - ... [[1, 3, 2], [2, 5, 1], [2, 3, 1]]], - ... dims=("x", "y", "z")) + >>> array = xr.DataArray( + ... [[[3, 2, 1], [3, 1, 2], [2, 1, 3]], [[1, 3, 2], [2, 5, 1], [2, 3, 1]]], + ... dims=("x", "y", "z"), + ... ) >>> array.max(dim="x") array([[3, 3, 2], From 3fdab8f860f324e4668d7e60f9f258f046a0a2b3 Mon Sep 17 00:00:00 2001 From: Keewis Date: Fri, 31 Jul 2020 23:07:36 +0200 Subject: [PATCH 19/50] properly reference ... (Ellipsis) --- xarray/core/computation.py | 2 +- xarray/core/dataarray.py | 6 +++--- xarray/core/groupby.py | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/xarray/core/computation.py b/xarray/core/computation.py index 418da7a0f78..576bc08fdad 100644 --- a/xarray/core/computation.py +++ b/xarray/core/computation.py @@ -1271,7 +1271,7 @@ def dot(*arrays, dims=None, **kwargs): ---------- arrays: DataArray (or Variable) objects Arrays to compute. - dims: '...', str or tuple of strings, optional + dims: ..., str or tuple of strings, optional Which dimensions to sum over. Ellipsis ('...') sums over all dimensions. If not specified, then all the common dimensions are summed over. **kwargs: dict diff --git a/xarray/core/dataarray.py b/xarray/core/dataarray.py index 4dc32fedb55..19885b91127 100644 --- a/xarray/core/dataarray.py +++ b/xarray/core/dataarray.py @@ -2496,7 +2496,7 @@ def from_dict(cls, d: dict) -> "DataArray": Parameters ---------- - d : dict, with a minimum structure of {'dims': [..], 'data': [..]} + d : dict, with a minimum structure of {'dims': [...], 'data': [...]} Returns ------- @@ -2926,8 +2926,8 @@ def dot( ---------- other : DataArray The other array with which the dot product is performed. - dims: '...', hashable or sequence of hashables, optional - Which dimensions to sum over. Ellipsis ('...') sums over all dimensions. + dims : ..., hashable or sequence of hashable, optional + Which dimensions to sum over. Ellipsis (`...`) sums over all dimensions. If not specified, then all the common dimensions are summed over. Returns diff --git a/xarray/core/groupby.py b/xarray/core/groupby.py index aa7aa1f5e86..1d0917120a1 100644 --- a/xarray/core/groupby.py +++ b/xarray/core/groupby.py @@ -558,7 +558,7 @@ def quantile( q : float in range of [0,1] (or sequence of floats) Quantile to compute, which must be between 0 and 1 inclusive. - dim : `...`, str or sequence of str, optional + dim : ..., str or sequence of str, optional Dimension(s) over which to apply quantile. Defaults to the grouped dimension. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} @@ -850,7 +850,7 @@ def reduce( Function which can be called in the form `func(x, axis=axis, **kwargs)` to return the result of collapsing an np.ndarray over an integer valued axis. - dim : `...`, str or sequence of str, optional + dim : ..., str or sequence of str, optional Dimension(s) over which to apply `func`. axis : int or sequence of int, optional Axis(es) over which to apply `func`. Only one of the 'dimension' @@ -961,7 +961,7 @@ def reduce(self, func, dim=None, keep_attrs=None, **kwargs): Function which can be called in the form `func(x, axis=axis, **kwargs)` to return the result of collapsing an np.ndarray over an integer valued axis. - dim : `...`, str or sequence of str, optional + dim : ..., str or sequence of str, optional Dimension(s) over which to apply `func`. axis : int or sequence of int, optional Axis(es) over which to apply `func`. Only one of the 'dimension' From b67c28053dce33129bca36452b3a6c4c2b53b0b5 Mon Sep 17 00:00:00 2001 From: Keewis Date: Sat, 1 Aug 2020 00:12:32 +0200 Subject: [PATCH 20/50] update the docstrings of dataset --- xarray/core/dataset.py | 211 +++++++++++++++++++++-------------------- 1 file changed, 108 insertions(+), 103 deletions(-) diff --git a/xarray/core/dataset.py b/xarray/core/dataset.py index 910fa2b72e1..6f4c77d717f 100644 --- a/xarray/core/dataset.py +++ b/xarray/core/dataset.py @@ -1396,7 +1396,7 @@ def set_coords( Parameters ---------- - names : hashable or iterable of hashables + names : hashable or iterable of hashable Name(s) of variables in this dataset to convert into coordinates. Returns @@ -1431,7 +1431,7 @@ def reset_coords( Parameters ---------- - names : hashable or iterable of hashables, optional + names : hashable or iterable of hashable, optional Name(s) of non-index coordinates in this dataset to reset into variables. By default, all non-index coordinates are reset. drop : bool, optional @@ -1494,12 +1494,12 @@ def to_netcdf( function returns the resulting netCDF file as bytes; in this case, we need to use scipy, which does not support netCDF version 4 (the default format becomes NETCDF3_64BIT). - mode : {'w', 'a'}, optional + mode : {"w", "a"}, default: "w" Write ('w') or append ('a') mode. If mode='w', any existing file at this location will be overwritten. If mode='a', existing variables will be overwritten. - format : {'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_64BIT', \ - 'NETCDF3_CLASSIC'}, optional + format : {"NETCDF4", "NETCDF4_CLASSIC", "NETCDF3_64BIT", \ + "NETCDF3_CLASSIC"}, optional File format for the resulting netCDF file: * NETCDF4: Data is stored in an HDF5 file, using netCDF4 API @@ -1522,7 +1522,7 @@ def to_netcdf( group : str, optional Path to the netCDF4 group in the given file to open (only works for format='NETCDF4'). The group(s) will be created if necessary. - engine : {'netcdf4', 'scipy', 'h5netcdf'}, optional + engine : {"netcdf4", "scipy", "h5netcdf"}, optional Engine to use when writing netCDF files. If not provided, the default engine is chosen based on available dependencies, with a preference for 'netcdf4' if writing to a file on disk. @@ -1543,10 +1543,10 @@ def to_netcdf( By default, no dimensions are treated as unlimited dimensions. Note that unlimited_dims may also be set via ``dataset.encoding['unlimited_dims']``. - compute: boolean + compute: bool, default: True If true compute immediately, otherwise return a ``dask.delayed.Delayed`` object that can be computed later. - invalid_netcdf: boolean + invalid_netcdf: bool, default: False Only valid along with engine='h5netcdf'. If True, allow writing hdf5 files which are invalid netcdf as described in https://github.com/shoyer/h5netcdf. Default: False. @@ -1589,7 +1589,7 @@ def to_zarr( ---------- store : MutableMapping, str or Path, optional Store or path to directory in file system. - mode : {'w', 'w-', 'a', None} + mode : {'w', 'w-', 'a', None}, optional Persistence mode: 'w' means create (overwrite if exists); 'w-' means create (fail if exists); 'a' means override existing variables (create if does not exist). @@ -1672,7 +1672,8 @@ def info(self, buf=None) -> None: Parameters ---------- - buf : writable buffer, defaults to sys.stdout + buf : file-like, default: sys.stdout + writable buffer See Also -------- @@ -1920,7 +1921,7 @@ def isel( drop : bool, optional If ``drop=True``, drop coordinates variables indexed by integers instead of making them scalar. - missing_dims : {"raise", "warn", "ignore"}, default "raise" + missing_dims : {"raise", "warn", "ignore"}, default: "raise" What to do if dimensions that should be selected from are not present in the Dataset: - "exception": raise an exception @@ -2062,7 +2063,7 @@ def sel( If DataArrays are passed as indexers, xarray-style indexing will be carried out. See :ref:`indexing` for the details. One of indexers or indexers_kwargs must be provided. - method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional + method : {None, "nearest", "pad", "ffill", "backfill", "bfill"}, optional Method to use for inexact matches: * None (default): only exact matches @@ -2215,7 +2216,7 @@ def thin( A dict with keys matching dimensions and integer values `n` or a single integer `n` applied over all dimensions. One of indexers or indexers_kwargs must be provided. - ``**indexers_kwargs`` : {dim: n, ...}, optional + **indexers_kwargs : {dim: n, ...}, optional The keyword arguments form of ``indexers``. One of indexers or indexers_kwargs must be provided. @@ -2295,7 +2296,7 @@ def reindex_like( other object need not be the same as the indexes on this dataset. Any mis-matched index values will be filled in with NaN, and any mis-matched dimension names will simply be ignored. - method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional + method : {None, "nearest", "pad", "ffill", "backfill", "bfill"}, optional Method to use for filling index values from other not found in this dataset: @@ -2349,13 +2350,13 @@ def reindex( Parameters ---------- - indexers : dict. optional + indexers : dict, optional Dictionary with keys given by dimension names and values given by arrays of coordinates tick labels. Any mis-matched coordinate values will be filled in with NaN, and any mis-matched dimension names will simply be ignored. One of indexers or indexers_kwargs must be provided. - method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional + method : {None, "nearest", "pad", "ffill", "backfill", "bfill"}, optional Method to use for filling index values in ``indexers`` not found in this dataset: @@ -2584,16 +2585,16 @@ def interp( New coordinate can be a scalar, array-like or DataArray. If DataArrays are passed as new coordates, their dimensions are used for the broadcasting. - method: string, optional. + method : str, optional. {'linear', 'nearest'} for multidimensional array, {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'} for 1-dimensional array. 'linear' is used by default. - assume_sorted: boolean, optional + assume_sorted : bool, optional If False, values of coordinates that are interpolated over can be in any order and they are sorted first. If True, interpolated coordinates are assumed to be an array of monotonically increasing values. - kwargs: dictionary, optional + kwargs: dict, optional Additional keyword arguments passed to scipy's interpolator. Valid options and their behavior depend on if 1-dimensional or multi-dimensional interpolation is used. @@ -2712,21 +2713,21 @@ def interp_like( Object with an 'indexes' attribute giving a mapping from dimension names to an 1d array-like, which provides coordinates upon which to index the variables in this dataset. - method: string, optional. + method : str, optional. {'linear', 'nearest'} for multidimensional array, {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'} for 1-dimensional array. 'linear' is used by default. - assume_sorted: boolean, optional + assume_sorted : bool, optional If False, values of coordinates that are interpolated over can be in any order and they are sorted first. If True, interpolated coordinates are assumed to be an array of monotonically increasing values. - kwargs: dictionary, optional + kwargs: dict, optional Additional keyword passed to scipy's interpolator. Returns ------- - interpolated: xr.Dataset + interpolated : Dataset Another dataset by interpolating this dataset's data along the coordinates of the other object. @@ -2813,7 +2814,7 @@ def rename( name_dict : dict-like, optional Dictionary whose keys are current variable or dimension names and whose values are the desired names. - **names, optional + **names : optional Keyword form of ``name_dict``. One of name_dict or names must be provided. @@ -2855,7 +2856,7 @@ def rename_dims( Dictionary whose keys are current dimension names and whose values are the desired names. The desired names must not be the name of an existing dimension or Variable in the Dataset. - **dims, optional + **dims : optional Keyword form of ``dims_dict``. One of dims_dict or dims must be provided. @@ -2899,7 +2900,7 @@ def rename_vars( name_dict : dict-like, optional Dictionary whose keys are current variable or coordinate names and whose values are the desired names. - **names, optional + **names : optional Keyword form of ``name_dict``. One of name_dict or names must be provided. @@ -3049,13 +3050,13 @@ def expand_dims( and the values are either integers (giving the length of the new dimensions) or array-like (giving the coordinates of the new dimensions). - axis : integer, sequence of integers, or None + axis : int, sequence of int, or None Axis position(s) where new axis is to be inserted (position(s) on the result array). If a list (or tuple) of integers is passed, multiple axes are inserted. In this case, dim arguments should be same length list. If axis=None is passed, all the axes will be inserted to the start of the result array. - **dim_kwargs : int or sequence/ndarray + **dim_kwargs : int or sequence or ndarray The keywords are arbitrary dimensions being inserted and the values are either the lengths of the new dims (if int is given), or their coordinates. Note, this is an alternative to passing a dict to the @@ -3178,7 +3179,7 @@ def set_index( append : bool, optional If True, append the supplied index(es) to the existing index(es). Otherwise replace the existing index(es) (default). - **indexes_kwargs: optional + **indexes_kwargs : optional The keyword arguments form of ``indexes``. One of indexes or indexes_kwargs must be provided. @@ -3275,7 +3276,7 @@ def reorder_levels( Mapping from names matching dimensions and values given by lists representing new level orders. Every given dimension must have a multi-index. - **dim_order_kwargs: optional + **dim_order_kwargs : optional The keyword arguments form of ``dim_order``. One of dim_order or dim_order_kwargs must be provided. @@ -3343,12 +3344,13 @@ def stack( Parameters ---------- - dimensions : Mapping of the form new_name=(dim1, dim2, ...) - Names of new dimensions, and the existing dimensions that they - replace. An ellipsis (`...`) will be replaced by all unlisted dimensions. + dimensions : mapping of hashable to sequence of hashable + Mapping of the form `new_name=(dim1, dim2, ...)`. Names of new + dimensions, and the existing dimensions that they replace. An + ellipsis (`...`) will be replaced by all unlisted dimensions. Passing a list containing an ellipsis (`stacked_dim=[...]`) will stack over all dimensions. - **dimensions_kwargs: + **dimensions_kwargs The keyword arguments form of ``dimensions``. One of dimensions or dimensions_kwargs must be provided. @@ -3382,9 +3384,9 @@ def to_stacked_array( Parameters ---------- - new_dim : Hashable + new_dim : hashable Name of the new stacked coordinate - sample_dims : Sequence[Hashable] + sample_dims : sequence of hashable Dimensions that **will not** be stacked. Each array in the dataset must share these dimensions. For machine learning applications, these define the dimensions over which samples are drawn. @@ -3538,11 +3540,13 @@ def unstack( Parameters ---------- - dim : Hashable or iterable of Hashable, optional + dim : hashable or iterable of hashable, optional Dimension(s) over which to unstack. By default unstacks all MultiIndexes. - fill_value: value to be filled. By default, np.nan - sparse: use sparse-array if True + fill_value : scalar, default: nan + value to be filled + sparse : bool, default: False + use sparse-array if True Returns ------- @@ -3588,7 +3592,7 @@ def update(self, other: "CoercibleMapping", inplace: bool = None) -> "Dataset": Parameters ---------- - other : Dataset or castable to Dataset + other : Dataset or mapping Variables with which to update this dataset. One of: - Dataset @@ -3631,13 +3635,13 @@ def merge( Parameters ---------- - other : Dataset or castable to Dataset + other : Dataset or mapping Dataset or variables to merge with this dataset. - overwrite_vars : Hashable or iterable of Hashable, optional + overwrite_vars : hashable or iterable of hashable, optional If provided, update variables of these name(s) without checking for conflicts in this dataset. - compat : {'broadcast_equals', 'equals', 'identical', \ - 'no_conflicts'}, optional + compat : {"broadcast_equals", "equals", "identical", \ + "no_conflicts"}, optional String indicating how to compare variables of the same name for potential conflicts: @@ -3650,7 +3654,7 @@ def merge( must be equal. The returned dataset then contains the combination of all non-null values. - join : {'outer', 'inner', 'left', 'right', 'exact'}, optional + join : {"outer", "inner", "left", "right", "exact"}, optional Method for joining ``self`` and ``other`` along shared dimensions: - 'outer': use the union of the indexes @@ -3658,7 +3662,7 @@ def merge( - 'left': use indexes from ``self`` - 'right': use indexes from ``other`` - 'exact': error instead of aligning non-equal indexes - fill_value: scalar, optional + fill_value : scalar, optional Value to use for newly missing values Returns @@ -3702,9 +3706,9 @@ def drop_vars( Parameters ---------- - names : hashable or iterable of hashables + names : hashable or iterable of hashable Name(s) of variables to drop. - errors: {'raise', 'ignore'}, optional + errors : {"raise", "ignore"}, optional If 'raise' (default), raises a ValueError error if any of the variable passed are not in the dataset. If 'ignore', any given names that are in the dataset are dropped and no error is raised. @@ -3783,9 +3787,9 @@ def drop_sel(self, labels=None, *, errors="raise", **labels_kwargs): Parameters ---------- - labels : Mapping[Hashable, Any] + labels : mapping of hashable to Any Index labels to drop - errors: {'raise', 'ignore'}, optional + errors : {"raise", "ignore"}, optional If 'raise' (default), raises a ValueError error if any of the index labels passed are not in the dataset. If 'ignore', any given labels that are in the @@ -3848,7 +3852,7 @@ def drop_dims( ---------- drop_dims : hashable or iterable of hashable Dimension or dimensions to drop. - errors: {'raise', 'ignore'}, optional + errors : {"raise", "ignore"}, optional If 'raise' (default), raises a ValueError error if any of the dimensions passed are not in the dataset. If 'ignore', any given labels that are in the dataset are dropped and no error is raised. @@ -3858,7 +3862,7 @@ def drop_dims( obj : Dataset The dataset without the given dimensions (or any variables containing those dimensions) - errors: {'raise', 'ignore'}, optional + errors : {"raise", "ignore"}, optional If 'raise' (default), raises a ValueError error if any of the dimensions passed are not in the dataset. If 'ignore', any given dimensions that are in the @@ -3890,7 +3894,7 @@ def transpose(self, *dims: Hashable) -> "Dataset": Parameters ---------- - *dims : Hashable, optional + *dims : hashable, optional By default, reverse the dimensions on each array. Otherwise, reorder the dimensions to this order. @@ -3935,13 +3939,13 @@ def dropna( Parameters ---------- - dim : Hashable + dim : hashable Dimension along which to drop missing values. Dropping along multiple dimensions simultaneously is not yet supported. - how : {'any', 'all'}, optional + how : {"any", "all"}, default: "any" * any : if any NA values are present, drop that label * all : if all values are NA, drop that label - thresh : int, default None + thresh : int, default: None If supplied, require this many non-NA values. subset : iterable of hashable, optional Which variables to check for missing values. By default, all @@ -4097,18 +4101,18 @@ def interpolate_na( - 'barycentric', 'krog', 'pchip', 'spline', 'akima': use their respective :py:class:`scipy.interpolate` classes. - use_coordinate : bool, str, default True + use_coordinate : bool, str, default: True Specifies which index to use as the x values in the interpolation formulated as `y = f(x)`. If False, values are treated as if eqaully-spaced along ``dim``. If True, the IndexVariable `dim` is used. If ``use_coordinate`` is a string, it specifies the name of a coordinate variariable to use as the index. - limit : int, default None + limit : int, default: None Maximum number of consecutive NaNs to fill. Must be greater than 0 or None for no limit. This filling is done regardless of the size of the gap in the data. To only interpolate over gaps less than a given length, see ``max_gap``. - max_gap: int, float, str, pandas.Timedelta, numpy.timedelta64, datetime.timedelta, default None. + max_gap: int, float, str, pandas.Timedelta, numpy.timedelta64, datetime.timedelta, default: None Maximum size of gap, a continuous sequence of NaNs, that will be filled. Use None for no limit. When interpolating along a datetime64 dimension and ``use_coordinate=True``, ``max_gap`` can be one of the following: @@ -4168,7 +4172,7 @@ def ffill(self, dim: Hashable, limit: int = None) -> "Dataset": dim : Hashable Specifies the dimension along which to propagate values when filling. - limit : int, default None + limit : int, default: None The maximum number of consecutive NaN values to forward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. Must be greater @@ -4193,7 +4197,7 @@ def bfill(self, dim: Hashable, limit: int = None) -> "Dataset": dim : str Specifies the dimension along which to propagate values when filling. - limit : int, default None + limit : int, default: None The maximum number of consecutive NaN values to backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. Must be greater @@ -4252,7 +4256,7 @@ def reduce( If True, the dataset's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. - keepdims : bool, default False + keepdims : bool, default: False If True, the dimensions which are reduced are left in the result as dimensions of size one. Coordinates that use these dimensions are removed. @@ -4406,12 +4410,12 @@ def assign( Parameters ---------- - variables : mapping, value pairs + variables : mapping of hashable to Any Mapping from variables names to the new values. If the new values are callable, they are computed on the Dataset and assigned to new data variables. If the values are not callable, (e.g. a DataArray, scalar, or array), they are simply assigned. - **variables_kwargs: + **variables_kwargs The keyword arguments form of ``variables``. One of variables or variables_kwargs must be provided. @@ -4621,9 +4625,9 @@ def from_dataframe(cls, dataframe: pd.DataFrame, sparse: bool = False) -> "Datas Parameters ---------- - dataframe : pandas.DataFrame + dataframe : DataFrame DataFrame from which to copy data and indices. - sparse : bool + sparse : bool, default: False If true, create a sparse arrays instead of dense numpy arrays. This can potentially save a large amount of memory if the DataFrame has a MultiIndex. Requires the sparse package (sparse.pydata.org). @@ -4808,9 +4812,10 @@ def from_dict(cls, d): Parameters ---------- - d : dict, with a minimum structure of {'var_0': {'dims': [..], \ - 'data': [..]}, \ - ...} + d : dict-like + Mapping with a minimum structure of + ``{'var_0': {'dims': [..], 'data': [..]}, \ + ...}`` Returns ------- @@ -5054,13 +5059,13 @@ def shift(self, shifts=None, fill_value=dtypes.NA, **shifts_kwargs): Parameters ---------- - shifts : Mapping with the form of {dim: offset} + shifts : mapping of hashable to int Integer offset to shift along each of the given dimensions. Positive offsets shift to the right; negative offsets shift to the left. - fill_value: scalar, optional + fill_value : scalar, optional Value to use for newly missing values - **shifts_kwargs: + **shifts_kwargs The keyword arguments form of ``shifts``. One of shifts or shifts_kwargs must be provided. @@ -5203,7 +5208,7 @@ def sortby(self, variables, ascending=True): Parameters ---------- - variables: str, DataArray, or list of either + variables: str, DataArray, or list of str or DataArray 1D DataArray objects or name(s) of 1D variable(s) in coords/data_vars whose values are used to sort the dataset. ascending: boolean, optional @@ -5254,11 +5259,11 @@ def quantile( Parameters ---------- - q : float in range of [0,1] or array-like of floats + q : float or array-like of float Quantile to compute, which must be between 0 and 1 inclusive. dim : str or sequence of str, optional Dimension(s) over which to apply quantile. - interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} + interpolation : {"linear", "lower", "higher", "midpoint", "nearest"}, default: "linear" This optional parameter specifies the interpolation method to use when the desired quantile lies between two data points ``i < j``: @@ -5437,12 +5442,12 @@ def differentiate(self, coord, edge_order=1, datetime_unit=None): Parameters ---------- - coord: str + coord : str The coordinate to be used to compute the gradient. - edge_order: 1 or 2. Default 1 + edge_order : {1, 2}, default: 1 N-th order accurate differences at the boundaries. - datetime_unit: None or any of {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', \ - 'us', 'ns', 'ps', 'fs', 'as'} + datetime_unit : None or {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', \ + 'us', 'ns', 'ps', 'fs', 'as'}, default: None Unit to compute gradient. Only valid for datetime coordinate. Returns @@ -5495,12 +5500,11 @@ def integrate(self, coord, datetime_unit=None): Parameters ---------- - coord: str, or a sequence of str + coord: str, or sequence of str Coordinate(s) used for the integration. - datetime_unit - Can be specify the unit if datetime coordinate is used. One of - {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', 'ps', 'fs', - 'as'} + datetime_unit : {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', \ + 'ps', 'fs', 'as'} + Can be specify the unit if datetime coordinate is used. Returns ------- @@ -5616,7 +5620,7 @@ def filter_by_attrs(self, **kwargs): Parameters ---------- - **kwargs : key=value + **kwargs key : str Attribute name. value : callable or obj @@ -5772,13 +5776,13 @@ def map_blocks( obj: DataArray, Dataset Passed to the function as its first argument, one block at a time. - args: Sequence + args: sequence Passed to func after unpacking and subsetting any xarray objects by blocks. xarray objects in args must be aligned with obj, otherwise an error is raised. - kwargs: Mapping + kwargs: mapping Passed verbatim to func after unpacking. xarray objects, if any, will not be subset to blocks. Passing dask collections in kwargs is not allowed. - template: (optional) DataArray, Dataset + template : DataArray or Dataset, optional xarray object representing the final result after compute is called. If not provided, the function will be first run on mocked-up data, that looks like ``obj`` but has sizes 0, to determine properties of the returned object such as dtype, @@ -5881,13 +5885,13 @@ def polyfit( invalid values, False otherwise. rcond : float, optional Relative condition number to the fit. - w : Union[Hashable, Any], optional + w : hashable or Any, optional Weights to apply to the y-coordinate of the sample points. Can be an array-like object or the name of a coordinate in the dataset. full : bool, optional Whether to return the residuals, matrix rank and singular values in addition to the coefficients. - cov : Union[bool, str], optional + cov : bool or str, optional Whether to return to the covariance matrix in addition to the coefficients. The matrix is not scaled if `cov='unscaled'`. @@ -6059,10 +6063,11 @@ def pad( Parameters ---------- - pad_width : Mapping with the form of {dim: (pad_before, pad_after)} - Number of values padded along each dimension. + pad_width : mapping of hashable to tuple of int + Mapping with the form of {dim: (pad_before, pad_after)} + describing the number of values padded along each dimension. {dim: pad} is a shortcut for pad_before = pad_after = pad - mode : str + mode : str, default: "constant" One of the following string values (taken from numpy docs). 'constant' (default) @@ -6095,7 +6100,7 @@ def pad( Pads with the wrap of the vector along the axis. The first values are used to pad the end and the end values are used to pad the beginning. - stat_length : int, tuple or mapping of the form {dim: tuple} + stat_length : int, tuple or mapping of hashable to tuple, default: None Used in 'maximum', 'mean', 'median', and 'minimum'. Number of values at edge of each axis used to calculate the statistic value. {dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)} unique @@ -6105,7 +6110,7 @@ def pad( (stat_length,) or int is a shortcut for before = after = statistic length for all axes. Default is ``None``, to use the entire axis. - constant_values : scalar, tuple or mapping of the form {dim: tuple} + constant_values : scalar, tuple or mapping of hashable to tuple, default: 0 Used in 'constant'. The values to set the padded values for each axis. ``{dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)}`` unique @@ -6115,7 +6120,7 @@ def pad( ``(constant,)`` or ``constant`` is a shortcut for ``before = after = constant`` for all dimensions. Default is 0. - end_values : scalar, tuple or mapping of the form {dim: tuple} + end_values : scalar, tuple or mapping of hashable to tuple, default: 0 Used in 'linear_ramp'. The values used for the ending value of the linear_ramp and that will form the edge of the padded array. ``{dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)}`` unique @@ -6130,7 +6135,7 @@ def pad( default with an unaltered reflection around the edge value. For the 'odd' style, the extended part of the array is created by subtracting the reflected values from two times the edge value. - **pad_width_kwargs: + **pad_width_kwargs The keyword arguments form of ``pad_width``. One of ``pad_width`` or ``pad_width_kwargs`` must be provided. @@ -6218,18 +6223,18 @@ def idxmin( dim : str, optional Dimension over which to apply `idxmin`. This is optional for 1D variables, but required for variables with 2 or more dimensions. - skipna : bool or None, default None + skipna : bool or None, default: None If True, skip missing values (as marked by NaN). By default, only skips missing values for ``float``, ``complex``, and ``object`` dtypes; other dtypes either do not have a sentinel missing value (``int``) or ``skipna=True`` has not been implemented (``datetime64`` or ``timedelta64``). - fill_value : Any, default NaN + fill_value : Any, default: NaN Value to be filled in case all of the values along a dimension are null. By default this is NaN. The fill value and result are automatically converted to a compatible dtype if possible. Ignored if ``skipna`` is False. - keep_attrs : bool, default False + keep_attrs : bool, default: False If True, the attributes (``attrs``) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. @@ -6316,18 +6321,18 @@ def idxmax( dim : str, optional Dimension over which to apply `idxmax`. This is optional for 1D variables, but required for variables with 2 or more dimensions. - skipna : bool or None, default None + skipna : bool or None, default: None If True, skip missing values (as marked by NaN). By default, only skips missing values for ``float``, ``complex``, and ``object`` dtypes; other dtypes either do not have a sentinel missing value (``int``) or ``skipna=True`` has not been implemented (``datetime64`` or ``timedelta64``). - fill_value : Any, default NaN + fill_value : Any, default: NaN Value to be filled in case all of the values along a dimension are null. By default this is NaN. The fill value and result are automatically converted to a compatible dtype if possible. Ignored if ``skipna`` is False. - keep_attrs : bool, default False + keep_attrs : bool, default: False If True, the attributes (``attrs``) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. From 09576d3848bc5d555b42ce367ccad181f80a5a93 Mon Sep 17 00:00:00 2001 From: Keewis Date: Sat, 1 Aug 2020 00:12:52 +0200 Subject: [PATCH 21/50] update the docstrings of DataArray --- xarray/core/dataarray.py | 174 +++++++++++++++++++++------------------ 1 file changed, 92 insertions(+), 82 deletions(-) diff --git a/xarray/core/dataarray.py b/xarray/core/dataarray.py index 19885b91127..0ea399d1d76 100644 --- a/xarray/core/dataarray.py +++ b/xarray/core/dataarray.py @@ -731,7 +731,7 @@ def reset_coords( Parameters ---------- - names : hashable or iterable of hashables, optional + names : hashable or iterable of hashable, optional Name(s) of non-index coordinates in this dataset to reset into variables. By default, all non-index coordinates are reset. drop : bool, optional @@ -979,7 +979,7 @@ def chunk( Parameters ---------- - chunks : int, tuple or mapping, optional + chunks : int, tuple of int or mapping of hashable to int, optional Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or ``{'x': 5, 'y': 5}``. name_prefix : str, optional @@ -1024,7 +1024,7 @@ def isel( drop : bool, optional If ``drop=True``, drop coordinates variables indexed by integers instead of making them scalar. - missing_dims : {"raise", "warn", "ignore"}, default "raise" + missing_dims : {"raise", "warn", "ignore"}, default: "raise" What to do if dimensions that should be selected from are not present in the DataArray: - "exception": raise an exception @@ -1111,7 +1111,7 @@ def sel( If DataArrays are passed as indexers, xarray-style indexing will be carried out. See :ref:`indexing` for the details. One of indexers or indexers_kwargs must be provided. - method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional + method : {None, "nearest", "pad", "ffill", "backfill", "bfill"}, optional Method to use for inexact matches: * None (default): only exact matches @@ -1291,7 +1291,7 @@ def reindex_like( other object need not be the same as the indexes on this dataset. Any mis-matched index values will be filled in with NaN, and any mis-matched dimension names will simply be ignored. - method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional + method : {None, "nearest", "pad", "ffill", "backfill", "bfill"}, optional Method to use for filling index values from other not found on this data array: @@ -1405,23 +1405,27 @@ def interp( ) -> "DataArray": """ Multidimensional interpolation of variables. + Parameters + ---------- coords : dict, optional Mapping from dimension names to the new coordinates. new coordinate can be an scalar, array-like or DataArray. If DataArrays are passed as new coordates, their dimensions are used for the broadcasting. - method: {'linear', 'nearest'} for multidimensional array, - {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'} - for 1-dimensional array. - assume_sorted: boolean, optional + method : str, default: "linear" + The method used to interpolate. Choose from + + - {'linear', 'nearest'} for multidimensional array, + - {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'} for 1-dimensional array. + assume_sorted : bool, optional If False, values of x can be in any order and they are sorted first. If True, x has to be an array of monotonically increasing values. - kwargs: dictionary + kwargs : dict Additional keyword arguments passed to scipy's interpolator. Valid options and their behavior depend on if 1-dimensional or multi-dimensional interpolation is used. - ``**coords_kwargs`` : {dim: coordinate, ...}, optional + **coords_kwargs : {dim: coordinate, ...}, optional The keyword arguments form of ``coords``. One of coords or coords_kwargs must be provided. @@ -1478,16 +1482,17 @@ def interp_like( Object with an 'indexes' attribute giving a mapping from dimension names to an 1d array-like, which provides coordinates upon which to index the variables in this dataset. - method: string, optional. - {'linear', 'nearest'} for multidimensional array, - {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'} - for 1-dimensional array. 'linear' is used by default. - assume_sorted: boolean, optional + method : str, default: "linear" + The method used to interpolate. Choose from + + - {'linear', 'nearest'} for multidimensional array, + - {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'} for 1-dimensional array. + assume_sorted : bool, optional If False, values of coordinates that are interpolated over can be in any order and they are sorted first. If True, interpolated coordinates are assumed to be an array of monotonically increasing values. - kwargs: dictionary, optional + kwargs : dict, optional Additional keyword passed to scipy's interpolator. Returns @@ -1530,7 +1535,7 @@ def rename( If the argument is dict-like, it used as a mapping from old names to new names for coordinates. Otherwise, use the argument as the new name for this array. - **names: hashable, optional + **names : hashable, optional The keyword arguments form of a mapping from old names to new names for coordinates. One of new_name_or_name_dict or names must be provided. @@ -1636,7 +1641,7 @@ def expand_dims( multiple axes are inserted. In this case, dim arguments should be same length list. If axis=None is passed, all the axes will be inserted to the start of the result array. - **dim_kwargs : int or sequence/ndarray + **dim_kwargs : int or sequence or ndarray The keywords are arbitrary dimensions being inserted and the values are either the lengths of the new dims (if int is given), or their coordinates. Note, this is an alternative to passing a dict to the @@ -1679,7 +1684,7 @@ def set_index( append : bool, optional If True, append the supplied index(es) to the existing index(es). Otherwise replace the existing index(es) (default). - **indexes_kwargs: optional + **indexes_kwargs : optional The keyword arguments form of ``indexes``. One of indexes or indexes_kwargs must be provided. @@ -1730,7 +1735,7 @@ def reset_index( Parameters ---------- - dims_or_levels : hashable or sequence of hashables + dims_or_levels : hashable or sequence of hashable Name(s) of the dimension(s) and/or multi-index level(s) that will be reset. drop : bool, optional @@ -1767,7 +1772,7 @@ def reorder_levels( Mapping from names matching dimensions and values given by lists representing new level orders. Every given dimension must have a multi-index. - **dim_order_kwargs: optional + **dim_order_kwargs : optional The keyword arguments form of ``dim_order``. One of dim_order or dim_order_kwargs must be provided. @@ -1803,12 +1808,13 @@ def stack( Parameters ---------- - dimensions : Mapping of the form new_name=(dim1, dim2, ...) + dimensions : mapping of hashable to sequence of hashable + Mapping of the form `new_name=(dim1, dim2, ...)`. Names of new dimensions, and the existing dimensions that they replace. An ellipsis (`...`) will be replaced by all unlisted dimensions. Passing a list containing an ellipsis (`stacked_dim=[...]`) will stack over all dimensions. - **dimensions_kwargs: + **dimensions_kwargs The keyword arguments form of ``dimensions``. One of dimensions or dimensions_kwargs must be provided. @@ -1861,8 +1867,10 @@ def unstack( dim : hashable or sequence of hashable, optional Dimension(s) over which to unstack. By default unstacks all MultiIndexes. - fill_value: value to be filled. By default, np.nan - sparse: use sparse-array if True + fill_value : scalar, default: nan + value to be filled. + sparse : bool, default: False + use sparse-array if True Returns ------- @@ -1912,7 +1920,7 @@ def to_unstacked_dataset(self, dim, level=0): level : int or str The MultiIndex level to expand to a dataset along. Can either be the integer index of the level or its name. - label : int, default 0 + label : int, default: 0 Label of the level to expand dataset along. Overrides the label argument if given. @@ -1975,7 +1983,7 @@ def transpose(self, *dims: Hashable, transpose_coords: bool = True) -> "DataArra *dims : hashable, optional By default, reverse the dimensions. Otherwise, reorder the dimensions to this order. - transpose_coords : boolean, default True + transpose_coords : boolean, default: True If True, also transpose the coordinates of this DataArray. Returns @@ -2017,9 +2025,9 @@ def drop_vars( Parameters ---------- - names : hashable or iterable of hashables + names : hashable or iterable of hashable Name(s) of variables to drop. - errors: {'raise', 'ignore'}, optional + errors: {"raise", "ignore"}, optional If 'raise' (default), raises a ValueError error if any of the variable passed are not in the dataset. If 'ignore', any given names that are in the DataArray are dropped and no error is raised. @@ -2063,9 +2071,9 @@ def drop_sel( Parameters ---------- - labels : Mapping[Hashable, Any] + labels : mapping of hashable to Any Index labels to drop - errors: {'raise', 'ignore'}, optional + errors : {"raise", "ignore"}, optional If 'raise' (default), raises a ValueError error if any of the index labels passed are not in the dataset. If 'ignore', any given labels that are in the @@ -2094,10 +2102,10 @@ def dropna( dim : hashable Dimension along which to drop missing values. Dropping along multiple dimensions simultaneously is not yet supported. - how : {'any', 'all'}, optional + how : {"any", "all"}, optional * any : if any NA values are present, drop that label * all : if all values are NA, drop that label - thresh : int, default None + thresh : int, default: None If supplied, require this many non-NA values. Returns @@ -2164,18 +2172,18 @@ def interpolate_na( - 'barycentric', 'krog', 'pchip', 'spline', 'akima': use their respective :py:class:`scipy.interpolate` classes. - use_coordinate : bool, str, default True + use_coordinate : bool or str, default: True Specifies which index to use as the x values in the interpolation formulated as `y = f(x)`. If False, values are treated as if eqaully-spaced along ``dim``. If True, the IndexVariable `dim` is used. If ``use_coordinate`` is a string, it specifies the name of a coordinate variariable to use as the index. - limit : int, default None + limit : int, default: None Maximum number of consecutive NaNs to fill. Must be greater than 0 or None for no limit. This filling is done regardless of the size of the gap in the data. To only interpolate over gaps less than a given length, see ``max_gap``. - max_gap: int, float, str, pandas.Timedelta, numpy.timedelta64, datetime.timedelta, default None. + max_gap: int, float, str, pandas.Timedelta, numpy.timedelta64, datetime.timedelta, default: None Maximum size of gap, a continuous sequence of NaNs, that will be filled. Use None for no limit. When interpolating along a datetime64 dimension and ``use_coordinate=True``, ``max_gap`` can be one of the following: @@ -2238,7 +2246,7 @@ def ffill(self, dim: Hashable, limit: int = None) -> "DataArray": dim : hashable Specifies the dimension along which to propagate values when filling. - limit : int, default None + limit : int, default: None The maximum number of consecutive NaN values to forward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. Must be greater @@ -2262,7 +2270,7 @@ def bfill(self, dim: Hashable, limit: int = None) -> "DataArray": dim : str Specifies the dimension along which to propagate values when filling. - limit : int, default None + limit : int, default: None The maximum number of consecutive NaN values to backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. Must be greater @@ -2307,11 +2315,11 @@ def reduce( Parameters ---------- - func : function + func : callable Function which can be called in the form `f(x, axis=axis, **kwargs)` to return the result of reducing an np.ndarray over an integer valued axis. - dim : hashable or sequence of hashables, optional + dim : hashable or sequence of hashable, optional Dimension(s) over which to apply `func`. axis : int or sequence of int, optional Axis(es) over which to repeatedly apply `func`. Only one of the @@ -2322,7 +2330,7 @@ def reduce( If True, the variable's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. - keepdims : bool, default False + keepdims : bool, default: False If True, the dimensions which are reduced are left in the result as dimensions of size one. Coordinates that use these dimensions are removed. @@ -2408,8 +2416,8 @@ def to_masked_array(self, copy: bool = True) -> np.ma.MaskedArray: Parameters ---------- - copy : bool - If True (default) make a copy of the array in the result. If False, + copy : bool, default: True + If True make a copy of the array in the result. If False, a MaskedArray view of DataArray.values is returned. Returns @@ -2496,7 +2504,8 @@ def from_dict(cls, d: dict) -> "DataArray": Parameters ---------- - d : dict, with a minimum structure of {'dims': [...], 'data': [...]} + d : dict + Mapping with a minimum structure of {'dims': [...], 'data': [...]} Returns ------- @@ -2739,7 +2748,7 @@ def _title_for_slice(self, truncate: int = 50) -> str: Parameters ---------- - truncate : integer + truncate : int, default: 50 maximum number of characters for title Returns @@ -2822,13 +2831,13 @@ def shift( Parameters ---------- - shifts : Mapping with the form of {dim: offset} + shifts : mapping of hashable to int, optional Integer offset to shift along each of the given dimensions. Positive offsets shift to the right; negative offsets shift to the left. fill_value: scalar, optional Value to use for newly missing values - **shifts_kwargs: + **shifts_kwargs The keyword arguments form of ``shifts``. One of shifts or shifts_kwargs must be provided. @@ -2871,7 +2880,7 @@ def roll( Parameters ---------- - shifts : Mapping with the form of {dim: offset} + shifts : mapping of hashable to int, optional Integer offset to rotate each of the given dimensions. Positive offsets roll to the right; negative offsets roll to the left. @@ -2880,7 +2889,8 @@ def roll( The current default of roll_coords (None, equivalent to True) is deprecated and will change to False in a future version. Explicitly pass roll_coords to silence the warning. - **shifts_kwargs : The keyword arguments form of ``shifts``. + **shifts_kwargs + The keyword arguments form of ``shifts``. One of shifts or shifts_kwargs must be provided. Returns @@ -2991,10 +3001,10 @@ def sortby( Parameters ---------- - variables: hashable, DataArray, or sequence of either + variables : hashable, DataArray, or sequence of hashable or DataArray 1D DataArray objects or name(s) of 1D variable(s) in coords whose values are used to sort this array. - ascending: boolean, optional + ascending : bool, optional Whether to sort by ascending or descending order. Returns @@ -3040,11 +3050,11 @@ def quantile( Parameters ---------- - q : float in range of [0,1] or array-like of floats + q : float or array-like of float Quantile to compute, which must be between 0 and 1 inclusive. dim : hashable or sequence of hashable, optional Dimension(s) over which to apply quantile. - interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} + interpolation : {"linear", "lower", "higher", "midpoint", "nearest"}, default: "linear" This optional parameter specifies the interpolation method to use when the desired quantile lies between two data points ``i < j``: @@ -3175,10 +3185,10 @@ def differentiate( ---------- coord: hashable The coordinate to be used to compute the gradient. - edge_order: 1 or 2. Default 1 + edge_order: {1, 2}, default: 1 N-th order accurate differences at the boundaries. - datetime_unit: None or any of {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', \ - 'us', 'ns', 'ps', 'fs', 'as'} + datetime_unit: None or {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', \ + 'us', 'ns', 'ps', 'fs', 'as'} Unit to compute gradient. Only valid for datetime coordinate. Returns @@ -3231,12 +3241,11 @@ def integrate( Parameters ---------- - dim: hashable, or a sequence of hashable + dim : hashable, or a sequence of hashable Coordinate(s) used for the integration. - datetime_unit: str, optional + datetime_unit : {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', \ + 'ps', 'fs', 'as'}, optional Can be used to specify the unit if datetime coordinate is used. - One of {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', 'ps', - 'fs', 'as'} Returns ------- @@ -3303,7 +3312,7 @@ def map_blocks( Parameters ---------- - func: callable + func : callable User-provided function that accepts a DataArray as its first parameter. The function will receive a subset or 'block' of this DataArray (see below), corresponding to one chunk along each chunked dimension. ``func`` will be @@ -3313,15 +3322,15 @@ def map_blocks( This function cannot add a new chunked dimension. - obj: DataArray, Dataset + obj : DataArray, Dataset Passed to the function as its first argument, one block at a time. - args: Sequence + args : sequence Passed to func after unpacking and subsetting any xarray objects by blocks. xarray objects in args must be aligned with obj, otherwise an error is raised. - kwargs: Mapping + kwargs : mapping Passed verbatim to func after unpacking. xarray objects, if any, will not be subset to blocks. Passing dask collections in kwargs is not allowed. - template: (optional) DataArray, Dataset + template : DataArray or Dataset, optional xarray object representing the final result after compute is called. If not provided, the function will be first run on mocked-up data, that looks like ``obj`` but has sizes 0, to determine properties of the returned object such as dtype, @@ -3423,13 +3432,13 @@ def polyfit( invalid values, False otherwise. rcond : float, optional Relative condition number to the fit. - w : Union[Hashable, Any], optional + w : hashable or array-like, optional Weights to apply to the y-coordinate of the sample points. Can be an array-like object or the name of a coordinate in the dataset. full : bool, optional Whether to return the residuals, matrix rank and singular values in addition to the coefficients. - cov : Union[bool, str], optional + cov : bool or str, optional Whether to return to the covariance matrix in addition to the coefficients. The matrix is not scaled if `cov='unscaled'`. @@ -3485,10 +3494,11 @@ def pad( Parameters ---------- - pad_width : Mapping with the form of {dim: (pad_before, pad_after)} - Number of values padded along each dimension. + pad_width : mapping of hashable to tuple of int + Mapping with the form of {dim: (pad_before, pad_after)} + describing the number of values padded along each dimension. {dim: pad} is a shortcut for pad_before = pad_after = pad - mode : str + mode : str, default: "constant" One of the following string values (taken from numpy docs) 'constant' (default) @@ -3521,7 +3531,7 @@ def pad( Pads with the wrap of the vector along the axis. The first values are used to pad the end and the end values are used to pad the beginning. - stat_length : int, tuple or mapping of the form {dim: tuple} + stat_length : int, tuple or mapping of hashable to tuple, default: None Used in 'maximum', 'mean', 'median', and 'minimum'. Number of values at edge of each axis used to calculate the statistic value. {dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)} unique @@ -3531,7 +3541,7 @@ def pad( (stat_length,) or int is a shortcut for before = after = statistic length for all axes. Default is ``None``, to use the entire axis. - constant_values : scalar, tuple or mapping of the form {dim: tuple} + constant_values : scalar, tuple or mapping of hashable to tuple, default: 0 Used in 'constant'. The values to set the padded values for each axis. ``{dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)}`` unique @@ -3541,7 +3551,7 @@ def pad( ``(constant,)`` or ``constant`` is a shortcut for ``before = after = constant`` for all dimensions. Default is 0. - end_values : scalar, tuple or mapping of the form {dim: tuple} + end_values : scalar, tuple or mapping of hashable to tuple, default: 0 Used in 'linear_ramp'. The values used for the ending value of the linear_ramp and that will form the edge of the padded array. ``{dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)}`` unique @@ -3556,7 +3566,7 @@ def pad( default with an unaltered reflection around the edge value. For the 'odd' style, the extended part of the array is created by subtracting the reflected values from two times the edge value. - **pad_width_kwargs: + **pad_width_kwargs The keyword arguments form of ``pad_width``. One of ``pad_width`` or ``pad_width_kwargs`` must be provided. @@ -3647,18 +3657,18 @@ def idxmin( dim : str, optional Dimension over which to apply `idxmin`. This is optional for 1D arrays, but required for arrays with 2 or more dimensions. - skipna : bool or None, default None + skipna : bool or None, default: None If True, skip missing values (as marked by NaN). By default, only skips missing values for ``float``, ``complex``, and ``object`` dtypes; other dtypes either do not have a sentinel missing value (``int``) or ``skipna=True`` has not been implemented (``datetime64`` or ``timedelta64``). - fill_value : Any, default NaN + fill_value : Any, default: NaN Value to be filled in case all of the values along a dimension are null. By default this is NaN. The fill value and result are automatically converted to a compatible dtype if possible. Ignored if ``skipna`` is False. - keep_attrs : bool, default False + keep_attrs : bool, default: False If True, the attributes (``attrs``) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. @@ -3741,21 +3751,21 @@ def idxmax( Parameters ---------- - dim : str, optional + dim : hashable, optional Dimension over which to apply `idxmax`. This is optional for 1D arrays, but required for arrays with 2 or more dimensions. - skipna : bool or None, default None + skipna : bool or None, default: None If True, skip missing values (as marked by NaN). By default, only skips missing values for ``float``, ``complex``, and ``object`` dtypes; other dtypes either do not have a sentinel missing value (``int``) or ``skipna=True`` has not been implemented (``datetime64`` or ``timedelta64``). - fill_value : Any, default NaN + fill_value : Any, default: NaN Value to be filled in case all of the values along a dimension are null. By default this is NaN. The fill value and result are automatically converted to a compatible dtype if possible. Ignored if ``skipna`` is False. - keep_attrs : bool, default False + keep_attrs : bool, default: False If True, the attributes (``attrs``) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. From 4c797ebc3203262bae52ae690d3b06074659052e Mon Sep 17 00:00:00 2001 From: Keewis Date: Sat, 1 Aug 2020 00:16:28 +0200 Subject: [PATCH 22/50] remove the references to CFTimeOffset --- xarray/coding/cftimeindex.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/xarray/coding/cftimeindex.py b/xarray/coding/cftimeindex.py index cd57af5c7eb..b843b8f3c48 100644 --- a/xarray/coding/cftimeindex.py +++ b/xarray/coding/cftimeindex.py @@ -695,7 +695,7 @@ def floor(self, freq): Parameters ---------- - freq : str or CFTimeOffset + freq : str The frequency level to round the index to. Must be a fixed frequency like 'S' (second) not 'ME' (month end). See `frequency aliases `_ @@ -712,7 +712,7 @@ def ceil(self, freq): Parameters ---------- - freq : str or CFTimeOffset + freq : str The frequency level to round the index to. Must be a fixed frequency like 'S' (second) not 'ME' (month end). See `frequency aliases `_ @@ -729,7 +729,7 @@ def round(self, freq): Parameters ---------- - freq : str or CFTimeOffset + freq : str The frequency level to round the index to. Must be a fixed frequency like 'S' (second) not 'ME' (month end). See `frequency aliases `_ From bf1c433c6638c45a1137e70c567bf953746783cd Mon Sep 17 00:00:00 2001 From: Keewis Date: Sat, 1 Aug 2020 01:20:03 +0200 Subject: [PATCH 23/50] fix a lot more docstrings --- doc/conf.py | 8 ++++- xarray/core/accessor_dt.py | 12 +++---- xarray/core/accessor_str.py | 55 ++++++++++++++++--------------- xarray/core/common.py | 38 ++++++++++++---------- xarray/core/computation.py | 6 ++-- xarray/core/dataarray.py | 10 +++--- xarray/core/dataset.py | 15 +++++---- xarray/core/variable.py | 65 +++++++++++++++++++------------------ xarray/plot/plot.py | 14 ++++---- 9 files changed, 118 insertions(+), 105 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index 6e3ae2a5dbb..c27df80b4fc 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -116,11 +116,15 @@ "sequence": ":term:`sequence`", "hashable": ":term:`hashable`", "iterable": ":term:`iterable`", - "callable": ":term:`callable`", + "callable": ":py:func:`callable`", "dict_like": ":term:`dict-like `", "dict-like": ":term:`dict-like `", "mapping": ":term:`mapping`", "file-like": ":term:`file-like `", + # stdlib type aliases + "MutableMapping": "~collections.abc.MutableMapping", + "sys.stdout": ":obj:`sys.stdout`", + "timedelta": "~datetime.timedelta", # numpy terms "array_like": ":term:`array_like`", "array-like": ":term:`array-like `", @@ -132,11 +136,13 @@ "DataArray": "xarray.DataArray", "Dataset": "xarray.Dataset", "ndarray": "~numpy.ndarray", + "MaskedArray": "~numpy.ma.MaskedArray", "dtype": "~numpy.dtype", "ComplexWarning": "~numpy.ComplexWarning", "Index": "~pandas.Index", "MultiIndex": "~pandas.MultiIndex", "CategoricalIndex": "~pandas.CategoricalIndex", + "Categorical": "~pandas.Categorical", "Path": "~~pathlib.Path", # objects with abbreviated namespace (from pandas) "pd.Index": "~pandas.Index", diff --git a/xarray/core/accessor_dt.py b/xarray/core/accessor_dt.py index 630aaee142f..e686dbc503f 100644 --- a/xarray/core/accessor_dt.py +++ b/xarray/core/accessor_dt.py @@ -190,8 +190,8 @@ def floor(self, freq): Parameters ---------- - freq : a freq string indicating the rounding resolution - e.g. 'D' for daily resolution + freq : str + a freq string indicating the rounding resolution e.g. 'D' for daily resolution Returns ------- @@ -207,8 +207,8 @@ def ceil(self, freq): Parameters ---------- - freq : a freq string indicating the rounding resolution - e.g. 'D' for daily resolution + freq : str + a freq string indicating the rounding resolution e.g. 'D' for daily resolution Returns ------- @@ -223,8 +223,8 @@ def round(self, freq): Parameters ---------- - freq : a freq string indicating the rounding resolution - e.g. 'D' for daily resolution + freq : str + a freq string indicating the rounding resolution e.g. 'D' for daily resolution Returns ------- diff --git a/xarray/core/accessor_str.py b/xarray/core/accessor_str.py index 5502ba72855..d3da075c17c 100644 --- a/xarray/core/accessor_str.py +++ b/xarray/core/accessor_str.py @@ -338,15 +338,15 @@ def count(self, pat, flags=0): This function is used to count the number of times a particular regex pattern is repeated in each of the string elements of the - :class:`~xarray.DatArray`. + :class:`~xarray.DataArray`. Parameters ---------- pat : str Valid regular expression. - flags : int, default 0, meaning no flags - Flags for the `re` module. For a complete list, `see here - `_. + flags : int, default: 0 + Flags for the `re` module. Use 0 for no flags. For a complete list, + `see here `_. Returns ------- @@ -404,9 +404,9 @@ def pad(self, width, side="left", fillchar=" "): width : int Minimum width of resulting string; additional characters will be filled with character defined in `fillchar`. - side : {'left', 'right', 'both'}, default 'left' + side : {'left', 'right', 'both'}, default: 'left' Side from which to fill resulting string. - fillchar : str, default ' ' + fillchar : str, default: ' ' Additional character for filling, default is whitespace. Returns @@ -517,11 +517,12 @@ def contains(self, pat, case=True, flags=0, regex=True): ---------- pat : str Character sequence or regular expression. - case : bool, default True + case : bool, default: True If True, case sensitive. - flags : int, default 0 (no flags) + flags : int, default: 0 Flags to pass through to the re module, e.g. re.IGNORECASE. - regex : bool, default True + ``0`` means no flags. + regex : bool, default: True If True, assumes the pat is a regular expression. If False, treats the pat as a literal string. @@ -558,12 +559,12 @@ def match(self, pat, case=True, flags=0): Parameters ---------- - pat : string + pat : str Character sequence or regular expression - case : boolean, default True + case : bool, default: True If True, case sensitive - flags : int, default 0 (no flags) - re module flags, e.g. re.IGNORECASE + flags : int, default: 0 + re module flags, e.g. re.IGNORECASE. ``0`` means no flags Returns ------- @@ -586,11 +587,11 @@ def strip(self, to_strip=None, side="both"): Parameters ---------- - to_strip : str or None, default None + to_strip : str or None, default: None Specifying the set of characters to be removed. All combinations of this set of characters will be stripped. If None then whitespaces are removed. - side : {'left', 'right', 'both'}, default 'left' + side : {'left', 'right', 'both'}, default: 'left' Side from which to strip. Returns @@ -620,7 +621,7 @@ def lstrip(self, to_strip=None): Parameters ---------- - to_strip : str or None, default None + to_strip : str or None, default: None Specifying the set of characters to be removed. All combinations of this set of characters will be stripped. If None then whitespaces are removed. @@ -640,7 +641,7 @@ def rstrip(self, to_strip=None): Parameters ---------- - to_strip : str or None, default None + to_strip : str or None, default: None Specifying the set of characters to be removed. All combinations of this set of characters will be stripped. If None then whitespaces are removed. @@ -742,7 +743,7 @@ def find(self, sub, start=0, end=None, side="left"): Left edge index end : int Right edge index - side : {'left', 'right'}, default 'left' + side : {'left', 'right'}, default: 'left' Starting side for search. Returns @@ -801,7 +802,7 @@ def index(self, sub, start=0, end=None, side="left"): Left edge index end : int Right edge index - side : {'left', 'right'}, default 'left' + side : {'left', 'right'}, default: 'left' Starting side for search. Returns @@ -852,22 +853,22 @@ def replace(self, pat, repl, n=-1, case=None, flags=0, regex=True): Parameters ---------- - pat : string or compiled regex + pat : str or re.Pattern String can be a character sequence or regular expression. - repl : string or callable + repl : str or callable Replacement string or a callable. The callable is passed the regex match object and must return a replacement string to be used. See :func:`re.sub`. - n : int, default -1 (all) - Number of replacements to make from start - case : boolean, default None + n : int, default: -1 + Number of replacements to make from start. Use ``-1`` to replace all. + case : bool, default: None - If True, case sensitive (the default if `pat` is a string) - Set to False for case insensitive - Cannot be set if `pat` is a compiled regex - flags : int, default 0 (no flags) - - re module flags, e.g. re.IGNORECASE + flags : int, default: 0 + - re module flags, e.g. re.IGNORECASE. Use ``0`` for no flags. - Cannot be set if `pat` is a compiled regex - regex : boolean, default True + regex : bool, default: True - If True, assumes the passed-in pattern is a regular expression. - If False, treats the pattern as a literal string - Cannot be set to False if `pat` is a compiled regex or `repl` is diff --git a/xarray/core/common.py b/xarray/core/common.py index 227ab6e80e5..c2d488c4093 100644 --- a/xarray/core/common.py +++ b/xarray/core/common.py @@ -515,14 +515,16 @@ def pipe( Parameters ---------- - func : function + func : callable function to apply to this xarray object (Dataset/DataArray). ``args``, and ``kwargs`` are passed into ``func``. Alternatively a ``(callable, data_keyword)`` tuple where ``data_keyword`` is a string indicating the keyword of ``callable`` that expects the xarray object. - args : positional arguments passed into ``func``. - kwargs : a dictionary of keyword arguments passed into ``func``. + args + positional arguments passed into ``func``. + kwargs + a dictionary of keyword arguments passed into ``func``. Returns ------- @@ -637,7 +639,7 @@ def groupby(self, group, squeeze: bool = True, restore_coord_dims: bool = None): group : str, DataArray or IndexVariable Array whose unique values should be used to group this array. If a string, must be the name of a variable contained in this dataset. - squeeze : boolean, optional + squeeze : bool, optional If "group" is a dimension of any arrays in this dataset, `squeeze` controls whether the subarrays have a dimension of length 1 along that dimension or if the dimension is squeezed out. @@ -713,17 +715,17 @@ def groupby_bins( group : str, DataArray or IndexVariable Array whose binned values should be used to group this array. If a string, must be the name of a variable contained in this dataset. - bins : int or array of scalars + bins : int or array-like If bins is an int, it defines the number of equal-width bins in the range of x. However, in this case, the range of x is extended by .1% on each side to include the min or max values of x. If bins is a sequence it defines the bin edges allowing for non-uniform bin width. No extension of the range of x is done in this case. - right : boolean, optional + right : boolean, default: True Indicates whether the bins include the rightmost edge or not. If right == True (the default), then the bins [1,2,3,4] indicate (1,2], (2,3], (3,4]. - labels : array or boolean, default None + labels : array-like or boolean, default: None Used as labels for the resulting bins. Must be of the same length as the resulting bins. If False, string bin labels are assigned by `pandas.cut`. @@ -731,7 +733,7 @@ def groupby_bins( The precision at which to store and display the bins labels. include_lowest : bool Whether the first interval should be left-inclusive or not. - squeeze : boolean, optional + squeeze : bool, default: True If "group" is a dimension of any arrays in this dataset, `squeeze` controls whether the subarrays have a dimension of length 1 along that dimension or if the dimension is squeezed out. @@ -878,8 +880,8 @@ def rolling_exp( Parameters ---------- - window : A single mapping from a dimension name to window value, - optional + window : {dim: window_size}, optional + A single mapping from a dimension name to window value. dim : str Name of the dimension to create the rolling exponential window @@ -887,8 +889,7 @@ def rolling_exp( window : int Size of the moving window. The type of this is specified in `window_type` - window_type : str, one of ['span', 'com', 'halflife', 'alpha'], - default 'span' + window_type : {'span', 'com', 'halflife', 'alpha'}, default: 'span' The format of the previously supplied window. Each is a simple numerical transformation of the others. Described in detail: https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.ewm.html @@ -931,7 +932,8 @@ def coarsen( multiple of the window size. If 'trim', the excess entries are dropped. If 'pad', NA will be padded. side : {'left', 'right'} or mapping of str to {"left", "right"} - coord_func : function (name) that is applied to the coordinates, + coord_func : str or mapping of hashable to str, default: "mean" + function (name) that is applied to the coordinates, or a mapping from coordinate name to function (name). keep_attrs : bool, optional If True, the object's attributes (`attrs`) will be copied from @@ -1011,9 +1013,9 @@ def resample( dimension must be datetime-like. skipna : bool, optional Whether to skip missing values when aggregating in downsampling. - closed : 'left' or 'right', optional + closed : {'left', 'right'}, optional Side of each interval to treat as closed. - label : 'left' or 'right', optional + label : {'left', 'right'}, optional Side of each interval to use for labeling. base : int, optional For frequencies that evenly subdivide 1 day, the "origin" of the @@ -1147,12 +1149,12 @@ def where(self, cond, other=dtypes.NA, drop: bool = False): Parameters ---------- - cond : DataArray or Dataset with boolean dtype - Locations at which to preserve this object's values. + cond : DataArray or Dataset + Locations at which to preserve this object's values. dtype must be `bool`. other : scalar, DataArray or Dataset, optional Value to use for locations in this object where ``cond`` is False. By default, these locations filled with NA. - drop : boolean, optional + drop : bool, optional If True, coordinate labels that only correspond to False values of the condition are dropped from the result. Mutually exclusive with ``other``. diff --git a/xarray/core/computation.py b/xarray/core/computation.py index 576bc08fdad..f0fd0cb7cb8 100644 --- a/xarray/core/computation.py +++ b/xarray/core/computation.py @@ -1269,12 +1269,12 @@ def dot(*arrays, dims=None, **kwargs): Parameters ---------- - arrays: DataArray (or Variable) objects + arrays : DataArray or Variable Arrays to compute. - dims: ..., str or tuple of strings, optional + dims : ..., str or tuple of str, optional Which dimensions to sum over. Ellipsis ('...') sums over all dimensions. If not specified, then all the common dimensions are summed over. - **kwargs: dict + **kwargs : dict Additional keyword arguments passed to numpy.einsum or dask.array.einsum diff --git a/xarray/core/dataarray.py b/xarray/core/dataarray.py index 0ea399d1d76..62f5600fcb5 100644 --- a/xarray/core/dataarray.py +++ b/xarray/core/dataarray.py @@ -1431,7 +1431,7 @@ def interp( Returns ------- - interpolated: xr.DataArray + interpolated : DataArray New dataarray on the new coordinates. Notes @@ -1497,7 +1497,7 @@ def interp_like( Returns ------- - interpolated: xr.DataArray + interpolated : DataArray Another dataarray by interpolating this dataarray's data along the coordinates of the other object. @@ -1635,7 +1635,7 @@ def expand_dims( dimensions and the values are either integers (giving the length of the new dimensions) or sequence/ndarray (giving the coordinates of the new dimensions). - axis : integer, list (or tuple) of integers, or None + axis : integer, list of int or or tuple of int, or None Axis position(s) where new axis is to be inserted (position(s) on the result array). If a list (or tuple) of integers is passed, multiple axes are inserted. In this case, dim arguments should be @@ -1983,7 +1983,7 @@ def transpose(self, *dims: Hashable, transpose_coords: bool = True) -> "DataArra *dims : hashable, optional By default, reverse the dimensions. Otherwise, reorder the dimensions to this order. - transpose_coords : boolean, default: True + transpose_coords : bool, default: True If True, also transpose the coordinates of this DataArray. Returns @@ -3241,7 +3241,7 @@ def integrate( Parameters ---------- - dim : hashable, or a sequence of hashable + dim : hashable, or sequence of hashable Coordinate(s) used for the integration. datetime_unit : {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', \ 'ps', 'fs', 'as'}, optional diff --git a/xarray/core/dataset.py b/xarray/core/dataset.py index 6f4c77d717f..bcaf79a410a 100644 --- a/xarray/core/dataset.py +++ b/xarray/core/dataset.py @@ -1488,7 +1488,7 @@ def to_netcdf( Parameters ---------- - path : str, Path or file-like object, optional + path : str, Path or file-like, optional Path to which to save this dataset. File-like objects are only supported by the scipy engine. If no path is provided, this function returns the resulting netCDF file as bytes; in this case, @@ -2375,7 +2375,8 @@ def reindex( the input. In either case, a new xarray object is always returned. fill_value : scalar, optional Value to use for newly missing values - sparse: use sparse-array. By default, False + sparse : bool, default: False + use sparse-array. **indexers_kwargs : {dim: indexer, ...}, optional Keyword arguments in the same form as ``indexers``. One of indexers or indexers_kwargs must be provided. @@ -2585,7 +2586,7 @@ def interp( New coordinate can be a scalar, array-like or DataArray. If DataArrays are passed as new coordates, their dimensions are used for the broadcasting. - method : str, optional. + method : str, optional {'linear', 'nearest'} for multidimensional array, {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'} for 1-dimensional array. 'linear' is used by default. @@ -2604,7 +2605,7 @@ def interp( Returns ------- - interpolated: xr.Dataset + interpolated : Dataset New dataset on the new coordinates. Notes @@ -2713,7 +2714,7 @@ def interp_like( Object with an 'indexes' attribute giving a mapping from dimension names to an 1d array-like, which provides coordinates upon which to index the variables in this dataset. - method : str, optional. + method : str, optional {'linear', 'nearest'} for multidimensional array, {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'} for 1-dimensional array. 'linear' is used by default. @@ -5211,12 +5212,12 @@ def sortby(self, variables, ascending=True): variables: str, DataArray, or list of str or DataArray 1D DataArray objects or name(s) of 1D variable(s) in coords/data_vars whose values are used to sort the dataset. - ascending: boolean, optional + ascending: bool, optional Whether to sort by ascending or descending order. Returns ------- - sorted: Dataset + sorted : Dataset A new dataset where all the specified dims are sorted by dim labels. """ diff --git a/xarray/core/variable.py b/xarray/core/variable.py index f9a41b2cee9..2effb56777a 100644 --- a/xarray/core/variable.py +++ b/xarray/core/variable.py @@ -1053,7 +1053,7 @@ def isel( **indexers : {dim: indexer, ...} Keyword arguments with names matching dimensions and values given by integers, slice objects or arrays. - missing_dims : {"raise", "warn", "ignore"}, default "raise" + missing_dims : {"raise", "warn", "ignore"}, default: "raise" What to do if dimensions that should be selected from are not present in the DataArray: - "exception": raise an exception @@ -1146,7 +1146,7 @@ def shift(self, shifts=None, fill_value=dtypes.NA, **shifts_kwargs): left. fill_value: scalar, optional Value to use for newly missing values - **shifts_kwargs: + **shifts_kwargs The keyword arguments form of ``shifts``. One of shifts or shifts_kwargs must be provided. @@ -1194,18 +1194,19 @@ def pad( Parameters ---------- - pad_width: Mapping with the form of {dim: (pad_before, pad_after)} - Number of values padded along each dimension. + pad_width : mapping of hashable to tuple of int + Mapping with the form of {dim: (pad_before, pad_after)} + describing the number of values padded along each dimension. {dim: pad} is a shortcut for pad_before = pad_after = pad - mode: (str) + mode : str, default: "constant" See numpy / Dask docs - stat_length : int, tuple or mapping of the form {dim: tuple} + stat_length : int, tuple or mapping of hashable to tuple Used in 'maximum', 'mean', 'median', and 'minimum'. Number of values at edge of each axis used to calculate the statistic value. - constant_values : scalar, tuple or mapping of the form {dim: tuple} + constant_values : scalar, tuple or mapping of hashable to tuple Used in 'constant'. The values to set the padded values for each axis. - end_values : scalar, tuple or mapping of the form {dim: tuple} + end_values : scalar, tuple or mapping of hashable to tuple Used in 'linear_ramp'. The values used for the ending value of the linear_ramp and that will form the edge of the padded array. reflect_type : {'even', 'odd'}, optional @@ -1213,7 +1214,7 @@ def pad( default with an unaltered reflection around the edge value. For the 'odd' style, the extended part of the array is created by subtracting the reflected values from two times the edge value. - **pad_width_kwargs: + **pad_width_kwargs One of pad_width or pad_width_kwargs must be provided. Returns @@ -1298,11 +1299,11 @@ def roll(self, shifts=None, **shifts_kwargs): Parameters ---------- - shifts : mapping of the form {dim: offset} + shifts : mapping of hashable to int Integer offset to roll along each of the given dimensions. Positive offsets roll to the right; negative offsets roll to the left. - **shifts_kwargs: + **shifts_kwargs The keyword arguments form of ``shifts``. One of shifts or shifts_kwargs must be provided. @@ -1440,10 +1441,11 @@ def stack(self, dimensions=None, **dimensions_kwargs): Parameters ---------- - dimensions : Mapping of form new_name=(dim1, dim2, ...) - Names of new dimensions, and the existing dimensions that they - replace. - **dimensions_kwargs: + dimensions : mapping of hashable to tuple of hashable + Mapping of form new_name=(dim1, dim2, ...) describing the + names of new dimensions, and the existing dimensions that + they replace. + **dimensions_kwargs The keyword arguments form of ``dimensions``. One of dimensions or dimensions_kwargs must be provided. @@ -1500,10 +1502,11 @@ def unstack(self, dimensions=None, **dimensions_kwargs): Parameters ---------- - dimensions : mapping of the form old_dim={dim1: size1, ...} - Names of existing dimensions, and the new dimensions and sizes + dimensions : mapping of hashable to mapping of hashable to int + Mapping of the form old_dim={dim1: size1, ...} describing the + names of existing dimensions, and the new dimensions and sizes that they map to. - **dimensions_kwargs: + **dimensions_kwargs The keyword arguments form of ``dimensions``. One of dimensions or dimensions_kwargs must be provided. @@ -1542,7 +1545,7 @@ def reduce( Parameters ---------- - func : function + func : callable Function which can be called in the form `func(x, axis=axis, **kwargs)` to return the result of reducing an np.ndarray over an integer valued axis. @@ -1557,7 +1560,7 @@ def reduce( If True, the variable's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. - keepdims : bool, default False + keepdims : bool, default: False If True, the dimensions which are reduced are left in the result as dimensions of size one **kwargs : dict @@ -1627,7 +1630,7 @@ def concat(cls, variables, dim="concat_dim", positions=None, shortcut=False): Parameters ---------- - variables : iterable of Array + variables : iterable of Variable Arrays to stack together. Each variable is expected to have matching dimensions and shape except for along the stacked dimension. @@ -1637,7 +1640,7 @@ def concat(cls, variables, dim="concat_dim", positions=None, shortcut=False): existing dimension name, in which case the location of the dimension is unchanged. Where to insert the new dimension is determined by the first variable. - positions : None or list of integer arrays, optional + positions : None or list of array-like, optional List of integer arrays which specifies the integer positions to which to assign each dataset along the concatenated dimension. If not supplied, objects are concatenated in the provided order. @@ -1746,12 +1749,12 @@ def quantile( Parameters ---------- - q : float in range of [0,1] (or sequence of floats) + q : float or sequence of float Quantile to compute, which must be between 0 and 1 inclusive. dim : str or sequence of str, optional Dimension(s) over which to apply quantile. - interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} + interpolation : {"linear", "lower", "higher", "midpoint", "nearest"}, default: "linear" This optional parameter specifies the interpolation method to use when the desired quantile lies between two data points ``i < j``: @@ -1882,16 +1885,16 @@ def rolling_window( Parameters ---------- - dim: str + dim : str Dimension over which to compute rolling_window - window: int + window : int Window size of the rolling - window_dim: str + window_dim : str New name of the window dimension. - center: boolean. default False. + center : bool, default: False. If True, pad fill_value for both ends. Otherwise, pad in the head of the axis. - fill_value: + fill_value value to be filled. Returns @@ -2528,7 +2531,7 @@ def concat(variables, dim="concat_dim", positions=None, shortcut=False): Parameters ---------- - variables : iterable of Array + variables : iterable of Variable Arrays to stack together. Each variable is expected to have matching dimensions and shape except for along the stacked dimension. @@ -2538,7 +2541,7 @@ def concat(variables, dim="concat_dim", positions=None, shortcut=False): existing dimension name, in which case the location of the dimension is unchanged. Where to insert the new dimension is determined by the first variable. - positions : None or list of integer arrays, optional + positions : None or list of array-like, optional List of integer arrays which specifies the integer positions to which to assign each dataset along the concatenated dimension. If not supplied, objects are concatenated in the provided order. diff --git a/xarray/plot/plot.py b/xarray/plot/plot.py index be79f0ab04c..ee079f31563 100644 --- a/xarray/plot/plot.py +++ b/xarray/plot/plot.py @@ -141,17 +141,17 @@ def plot( Parameters ---------- darray : DataArray - row : string, optional + row : str, optional If passed, make row faceted plots on this dimension name - col : string, optional + col : str, optional If passed, make column faceted plots on this dimension name - hue : string, optional + hue : str, optional If passed, make faceted line plots with hue on this dimension name - col_wrap : integer, optional + col_wrap : int, optional Use together with ``col`` to wrap faceted plots - ax : matplotlib axes, optional + ax : matplotlib.axes.Axes, optional If None, uses the current axis. Not applicable when using facets. - rtol : number, optional + rtol : float, optional Relative tolerance used to determine if the indexes are uniformly spaced. Usually a small positive number. subplot_kws : dict, optional @@ -407,7 +407,7 @@ def hist( size : scalar, optional If provided, create a new figure for the plot with the given size. Height (in inches) of each plot. See also: ``aspect``. - ax : matplotlib axes object, optional + ax : matplotlib.axes.Axes, optional Axis on which to plot this figure. By default, use the current axis. Mutually exclusive with ``size`` and ``figsize``. **kwargs : optional From 4a49002e9eeeeaf44709032d9ba9d53d5bd0f8ea Mon Sep 17 00:00:00 2001 From: Keewis Date: Sat, 1 Aug 2020 02:47:46 +0200 Subject: [PATCH 24/50] fix even more docstrings --- doc/conf.py | 9 ++++++-- xarray/backends/api.py | 26 +++++++++++------------ xarray/backends/zarr.py | 8 ++++---- xarray/coding/frequencies.py | 2 +- xarray/conventions.py | 14 ++++++------- xarray/core/computation.py | 30 +++++++++++++-------------- xarray/core/groupby.py | 36 +++++++++++++++++--------------- xarray/core/merge.py | 22 +++++++++++--------- xarray/core/parallel.py | 10 ++++----- xarray/core/resample.py | 12 +++++------ xarray/core/rolling.py | 40 +++++++++++++++++++++--------------- xarray/core/rolling_exp.py | 6 ++++-- 12 files changed, 117 insertions(+), 98 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index c27df80b4fc..389fb993805 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -133,8 +133,9 @@ # matplotlib terms "color-like": ":py:func:`is_color_like`", # objects without namespace - "DataArray": "xarray.DataArray", - "Dataset": "xarray.Dataset", + "DataArray": "~xarray.DataArray", + "Dataset": "~xarray.Dataset", + "Variable": "~xarray.Variable", "ndarray": "~numpy.ndarray", "MaskedArray": "~numpy.ma.MaskedArray", "dtype": "~numpy.dtype", @@ -142,6 +143,10 @@ "Index": "~pandas.Index", "MultiIndex": "~pandas.MultiIndex", "CategoricalIndex": "~pandas.CategoricalIndex", + "TimedeltaIndex": "~pandas.TimedeltaIndex", + "DatetimeIndex": "~pandas.DatetimeIndex", + "Series": "~pandas.Series", + "DataFrame": "~pandas.DataFrame", "Categorical": "~pandas.Categorical", "Path": "~~pathlib.Path", # objects with abbreviated namespace (from pandas) diff --git a/xarray/backends/api.py b/xarray/backends/api.py index 195ff40ad20..bfca4ddc80c 100644 --- a/xarray/backends/api.py +++ b/xarray/backends/api.py @@ -307,7 +307,7 @@ def open_dataset( Parameters ---------- - filename_or_obj : str, Path, file or xarray.backends.*DataStore + filename_or_obj : str, Path, file-like or DataStore Strings and Path objects are interpreted as a path to a netCDF file or an OpenDAP URL and opened with python-netCDF4, unless the filename ends with .gz, in which case the file is gunzipped and opened with @@ -352,7 +352,7 @@ def open_dataset( If chunks is provided, it used to load the new dataset into dask arrays. ``chunks={}`` loads the dataset with dask using a single chunk for all arrays. - lock : False or duck threading.Lock, optional + lock : False or lock-like, optional Resource lock to use when reading data from disk. Only relevant when using dask or another form of parallelism. By default, appropriate locks are chosen to safely read and write files with the currently @@ -364,11 +364,11 @@ def open_dataset( argument to use dask, in which case it defaults to False. Does not change the behavior of coordinates corresponding to dimensions, which always load their data from disk into a ``pandas.Index``. - drop_variables: string or iterable, optional + drop_variables: str or iterable, optional A variable or list of variables to exclude from being parsed from the dataset. This may be useful to drop variables with problems or inconsistent values. - backend_kwargs: dictionary, optional + backend_kwargs: dict, optional A dictionary of keyword arguments to pass on to the backend. This may be useful when backend options would improve performance or allow user control of dataset processing. @@ -578,7 +578,7 @@ def open_dataarray( Parameters ---------- - filename_or_obj : str, Path, file or xarray.backends.*DataStore + filename_or_obj : str, Path, file-like or DataStore Strings and Paths are interpreted as a path to a netCDF file or an OpenDAP URL and opened with python-netCDF4, unless the filename ends with .gz, in which case the file is gunzipped and opened with @@ -618,7 +618,7 @@ def open_dataarray( chunks : int or dict, optional If chunks is provided, it used to load the new dataset into dask arrays. - lock : False or duck threading.Lock, optional + lock : False or lock-like, optional Resource lock to use when reading data from disk. Only relevant when using dask or another form of parallelism. By default, appropriate locks are chosen to safely read and write files with the currently @@ -630,11 +630,11 @@ def open_dataarray( argument to use dask, in which case it defaults to False. Does not change the behavior of coordinates corresponding to dimensions, which always load their data from disk into a ``pandas.Index``. - drop_variables: string or iterable, optional + drop_variables: str or iterable, optional A variable or list of variables to exclude from being parsed from the dataset. This may be useful to drop variables with problems or inconsistent values. - backend_kwargs: dictionary, optional + backend_kwargs: dict, optional A dictionary of keyword arguments to pass on to the backend. This may be useful when backend options would improve performance or allow user control of dataset processing. @@ -799,12 +799,12 @@ def open_mfdataset( Engine to use when reading files. If not provided, the default engine is chosen based on available dependencies, with a preference for 'netcdf4'. - lock : False or duck threading.Lock, optional + lock : False or lock-like, optional Resource lock to use when reading data from disk. Only relevant when using dask or another form of parallelism. By default, appropriate locks are chosen to safely read and write files with the currently active dask scheduler. - data_vars : {'minimal', 'different', 'all' or list of str}, optional + data_vars : {'minimal', 'different', 'all'} or list of str, optional These data variables will be concatenated together: * 'minimal': Only data variables in which the dimension already appears are included. @@ -816,7 +816,7 @@ def open_mfdataset( * 'all': All data variables will be concatenated. * list of str: The listed data variables will be concatenated, in addition to the 'minimal' data variables. - coords : {'minimal', 'different', 'all' or list of str}, optional + coords : {'minimal', 'different', 'all'} or list of str, optional These coordinate variables will be concatenated together: * 'minimal': Only coordinates in which the dimension already appears are included. @@ -1142,7 +1142,7 @@ def save_mfdataset( Parameters ---------- - datasets : list of xarray.Dataset + datasets : list of Dataset List of datasets to save. paths : list of str or list of Path List of paths to which to save each corresponding dataset. @@ -1180,7 +1180,7 @@ def save_mfdataset( default engine is chosen based on available dependencies, with a preference for 'netcdf4' if writing to a file on disk. See `Dataset.to_netcdf` for additional information. - compute: bool + compute : bool If true compute immediately, otherwise return a ``dask.delayed.Delayed`` object that can be computed later. diff --git a/xarray/backends/zarr.py b/xarray/backends/zarr.py index 540759a1c4c..3c85ae0b976 100644 --- a/xarray/backends/zarr.py +++ b/xarray/backends/zarr.py @@ -189,7 +189,7 @@ def extract_zarr_variable_encoding(variable, raise_on_invalid=False, name=None): Parameters ---------- - variable : xarray.Variable + variable : Variable raise_on_invalid : bool, optional Returns @@ -233,12 +233,12 @@ def encode_zarr_variable(var, needs_copy=True, name=None): Parameters ---------- - var : xarray.Variable + var : Variable A variable holding un-encoded data. Returns ------- - out : xarray.Variable + out : Variable A variable which has been encoded as described above. """ @@ -556,7 +556,7 @@ def open_zarr( decode_coords : bool, optional If True, decode the 'coordinates' attribute to identify coordinates in the resulting dataset. - drop_variables : string or iterable, optional + drop_variables : str or iterable, optional A variable or list of variables to exclude from being parsed from the dataset. This may be useful to drop variables with problems or inconsistent values. diff --git a/xarray/coding/frequencies.py b/xarray/coding/frequencies.py index 86f84ba5fbd..fa11d05923f 100644 --- a/xarray/coding/frequencies.py +++ b/xarray/coding/frequencies.py @@ -61,7 +61,7 @@ def infer_freq(index): Parameters ---------- - index : CFTimeIndex, DataArray, pd.DatetimeIndex, pd.TimedeltaIndex, pd.Series + index : CFTimeIndex, DataArray, DatetimeIndex, TimedeltaIndex, Series If not passed a CFTimeIndex, this simply calls `pandas.infer_freq`. If passed a Series or a DataArray will use the values of the series (NOT THE INDEX). diff --git a/xarray/conventions.py b/xarray/conventions.py index 700dcbc0fc4..cac884eb72c 100644 --- a/xarray/conventions.py +++ b/xarray/conventions.py @@ -230,12 +230,12 @@ def encode_cf_variable(var, needs_copy=True, name=None): Parameters ---------- - var : xarray.Variable + var : Variable A variable holding un-encoded data. Returns ------- - out : xarray.Variable + out : Variable A variable which has been encoded as described above. """ ensure_not_multiindex(var, name=name) @@ -278,14 +278,14 @@ def decode_cf_variable( Parameters ---------- - name: str + name : str Name of the variable. Used for better error messages. var : Variable A variable holding potentially CF encoded information. concat_characters : bool Should character arrays be concatenated to strings, for example: ['h', 'e', 'l', 'l', 'o'] -> 'hello' - mask_and_scale: bool + mask_and_scale : bool Lazily scale (using scale_factor and add_offset) and mask (using _FillValue). If the _Unsigned attribute is present treat integer arrays as unsigned. @@ -297,7 +297,7 @@ def decode_cf_variable( Whether to stack characters into bytes along the last dimension of this array. Passed as an argument because we need to look at the full dataset to figure out if this is appropriate. - use_cftime: bool, optional + use_cftime : bool, optional Only relevant if encoded dates come from a standard calendar (e.g. 'gregorian', 'proleptic_gregorian', 'standard', or not specified). If None (default), attempt to decode times to @@ -534,7 +534,7 @@ def decode_cf( concat_characters : bool, optional Should character arrays be concatenated to strings, for example: ['h', 'e', 'l', 'l', 'o'] -> 'hello' - mask_and_scale: bool, optional + mask_and_scale : bool, optional Lazily scale (using scale_factor and add_offset) and mask (using _FillValue). decode_times : bool, optional @@ -543,7 +543,7 @@ def decode_cf( decode_coords : bool, optional Use the 'coordinates' attribute on variable (or the dataset itself) to identify coordinates. - drop_variables: string or iterable, optional + drop_variables: str or iterable, optional A variable or list of variables to exclude from being parsed from the dataset. This may be useful to drop variables with problems or inconsistent values. diff --git a/xarray/core/computation.py b/xarray/core/computation.py index f0fd0cb7cb8..8a793dd7056 100644 --- a/xarray/core/computation.py +++ b/xarray/core/computation.py @@ -176,7 +176,7 @@ def build_output_coords( are OK, e.g., scalars, Variable, DataArray, Dataset. signature : _UfuncSignature Core dimensions signature for the operation. - exclude_dims : optional set + exclude_dims : set, optional Dimensions excluded from the operation. Coordinates along these dimensions are dropped. @@ -790,9 +790,9 @@ def apply_ufunc( the style of NumPy universal functions [1]_ (if this is not the case, set ``vectorize=True``). If this function returns multiple outputs, you must set ``output_core_dims`` as well. - *args : Dataset, DataArray, GroupBy, Variable, numpy/dask arrays or scalars + *args : Dataset, DataArray, GroupBy, Variable, numpy.ndarray, dask.array.Array or scalar Mix of labeled and/or unlabeled arrays to which to apply the function. - input_core_dims : Sequence[Sequence], optional + input_core_dims : sequence of sequence, optional List of the same length as ``args`` giving the list of core dimensions on each input argument that should not be broadcast. By default, we assume there are no core dimensions on any input arguments. @@ -804,7 +804,7 @@ def apply_ufunc( Core dimensions are automatically moved to the last axes of input variables before applying ``func``, which facilitates using NumPy style generalized ufuncs [2]_. - output_core_dims : List[tuple], optional + output_core_dims : list of tuple, optional List of the same length as the number of output arguments from ``func``, giving the list of core dimensions on each output that were not broadcast on the inputs. By default, we assume that ``func`` @@ -825,7 +825,7 @@ def apply_ufunc( :py:func:`numpy.vectorize`. This option exists for convenience, but is almost always slower than supplying a pre-vectorized function. Using this option requires NumPy version 1.12 or newer. - join : {'outer', 'inner', 'left', 'right', 'exact'}, optional + join : {"outer", "inner", "left", "right", "exact"}, default: "exact" Method for joining the indexes of the passed objects along each dimension, and the variables of Dataset objects with mismatched data variables: @@ -836,7 +836,7 @@ def apply_ufunc( - 'right': use indexes from the last object with each dimension - 'exact': raise `ValueError` instead of aligning when indexes to be aligned are not equal - dataset_join : {'outer', 'inner', 'left', 'right', 'exact'}, optional + dataset_join : {"outer", "inner", "left", "right", "exact"}, default: "exact" Method for joining variables of Dataset objects with mismatched data variables. @@ -849,11 +849,11 @@ def apply_ufunc( Value used in place of missing variables on Dataset inputs when the datasets do not share the exact same ``data_vars``. Required if ``dataset_join not in {'inner', 'exact'}``, otherwise ignored. - keep_attrs: boolean, Optional + keep_attrs: bool, Optional Whether to copy attributes from the first argument to the output. kwargs: dict, optional Optional keyword arguments passed directly on to call ``func``. - dask: 'forbidden', 'allowed' or 'parallelized', optional + dask: {"forbidden", "allowed", "parallelized"}, default: "forbidden" How to handle applying to objects containing lazy data in the form of dask arrays: @@ -862,7 +862,7 @@ def apply_ufunc( - 'parallelized': automatically parallelize ``func`` if any of the inputs are a dask array. If used, the ``output_dtypes`` argument must also be provided. Multiple output arguments are not yet supported. - output_dtypes : list of dtypes, optional + output_dtypes : list of dtype, optional Optional list of output dtypes. Only used if dask='parallelized'. output_sizes : dict, optional Optional mapping from dimension names to sizes for outputs. Only used @@ -1075,9 +1075,9 @@ def cov(da_a, da_b, dim=None, ddof=1): Parameters ---------- - da_a: DataArray object + da_a: DataArray Array to compute. - da_b: DataArray object + da_b: DataArray Array to compute. dim : str, optional The dimension along which the covariance will be computed @@ -1155,9 +1155,9 @@ def corr(da_a, da_b, dim=None): Parameters ---------- - da_a: DataArray object + da_a: DataArray Array to compute. - da_b: DataArray object + da_b: DataArray Array to compute. dim: str, optional The dimension along which the correlation will be computed @@ -1280,7 +1280,7 @@ def dot(*arrays, dims=None, **kwargs): Returns ------- - dot: DataArray + DataArray Examples -------- @@ -1513,7 +1513,7 @@ def polyval(coord, coeffs, degree_dim="degree"): The 1D coordinate along which to evaluate the polynomial. coeffs : DataArray Coefficients of the polynomials. - degree_dim : str, default "degree" + degree_dim : str, default: "degree" Name of the polynomial degree dimension in `coeffs`. See also diff --git a/xarray/core/groupby.py b/xarray/core/groupby.py index 1d0917120a1..1c2ea5aca5a 100644 --- a/xarray/core/groupby.py +++ b/xarray/core/groupby.py @@ -43,7 +43,7 @@ def unique_value_groups(ar, sort=True): ---------- ar : array-like Input array. This will be flattened if it is not already 1-D. - sort : boolean, optional + sort : bool, optional Whether or not to sort unique values. Returns @@ -128,7 +128,7 @@ def _inverse_permutation_indices(positions): Parameters ---------- - positions : list of np.ndarray or slice objects. + positions : list of ndarray or slice If slice objects, all are assumed to be slices. Returns @@ -283,16 +283,16 @@ def __init__( Object to group. group : DataArray Array with the group values. - squeeze : boolean, optional + squeeze : bool, optional If "group" is a coordinate of object, `squeeze` controls whether the subarrays have a dimension of length 1 along that coordinate or if the dimension is squeezed out. - grouper : pd.Grouper, optional + grouper : pandas.Grouper, optional Used for grouping values along the `group` array. bins : array-like, optional If `bins` is specified, the groups will be discretized into the specified bins by `pandas.cut`. - restore_coord_dims : bool, default True + restore_coord_dims : bool, default: True If True, also restore the dimension order of multi-dimensional coordinates. cut_kwargs : dict, optional @@ -532,8 +532,10 @@ def fillna(self, value): Parameters ---------- - value : valid type for the grouped object's fillna method - Used to fill all matching missing values by group. + value + Used to fill all matching missing values by group. Needs + to be of a valid type for the wrapped object's fillna + method. Returns ------- @@ -555,13 +557,13 @@ def quantile( Parameters ---------- - q : float in range of [0,1] (or sequence of floats) + q : float or sequence of float Quantile to compute, which must be between 0 and 1 inclusive. dim : ..., str or sequence of str, optional Dimension(s) over which to apply quantile. Defaults to the grouped dimension. - interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} + interpolation : {"linear", "lower", "higher", "midpoint", "nearest"}, default: "linear" This optional parameter specifies the interpolation method to use when the desired quantile lies between two data points ``i < j``: @@ -659,8 +661,8 @@ def where(self, cond, other=dtypes.NA): Parameters ---------- - cond : DataArray or Dataset with boolean dtype - Locations at which to preserve this objects values. + cond : DataArray or Dataset + Locations at which to preserve this objects values. dtypes have to be `bool` other : scalar, DataArray or Dataset, optional Value to use for locations in this object where ``cond`` is False. By default, inserts missing values. @@ -768,7 +770,7 @@ def map(self, func, shortcut=False, args=(), **kwargs): Parameters ---------- - func : function + func : callable Callable to apply to each array. shortcut : bool, optional Whether or not to shortcut evaluation under the assumptions that: @@ -782,9 +784,9 @@ def map(self, func, shortcut=False, args=(), **kwargs): If these conditions are satisfied `shortcut` provides significant speedup. This should be the case for many common groupby operations (e.g., applying numpy ufuncs). - ``*args`` : tuple, optional + *args : tuple, optional Positional arguments passed to `func`. - ``**kwargs`` + **kwargs Used to call `func(ar, **kwargs)` for each array `ar`. Returns @@ -846,7 +848,7 @@ def reduce( Parameters ---------- - func : function + func : callable Function which can be called in the form `func(x, axis=axis, **kwargs)` to return the result of collapsing an np.ndarray over an integer valued axis. @@ -906,7 +908,7 @@ def map(self, func, args=(), shortcut=None, **kwargs): Parameters ---------- - func : function + func : callable Callable to apply to each sub-dataset. args : tuple, optional Positional arguments to pass to `func`. @@ -957,7 +959,7 @@ def reduce(self, func, dim=None, keep_attrs=None, **kwargs): Parameters ---------- - func : function + func : callable Function which can be called in the form `func(x, axis=axis, **kwargs)` to return the result of collapsing an np.ndarray over an integer valued axis. diff --git a/xarray/core/merge.py b/xarray/core/merge.py index 35b77d700a0..62329b2f25b 100644 --- a/xarray/core/merge.py +++ b/xarray/core/merge.py @@ -90,12 +90,12 @@ def unique_variable( ---------- name : hashable Name for this variable. - variables : list of xarray.Variable + variables : list of Variable List of Variable objects, all of which go by the same name in different inputs. compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts', 'override'}, optional Type of equality check to use. - equals: None or bool, + equals : None or bool, optional corresponding to result of compat test Returns @@ -170,7 +170,9 @@ def merge_collected( Parameters ---------- - + grouped : mapping + prioritized : mapping + compat : str Type of equality check to use when checking for conflicts. Returns @@ -335,7 +337,7 @@ def determine_coords( Parameters ---------- - list_of_mappings : list of dict or Dataset objects + list_of_mappings : list of dict or list of Dataset Of the same form as the arguments to expand_variable_dicts. Returns @@ -410,7 +412,7 @@ def _get_priority_vars_and_indexes( Parameters ---------- - objects : list of dictionaries of variables + objects : list of dict-like of variables Dictionaries in which to find the priority variables. priority_arg : int or None Integer object whose variable should take priority. @@ -550,7 +552,7 @@ def merge_core( Parameters ---------- - objects : list of mappings + objects : list of mapping All values must be convertable to labeled arrays. compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts', 'override'}, optional Compatibility checks to use when merging variables. @@ -558,7 +560,7 @@ def merge_core( How to combine objects with different indexes. combine_attrs : {'drop', 'identical', 'no_conflicts', 'override'}, optional How to combine attributes of objects - priority_arg : integer, optional + priority_arg : int, optional Optional argument in `objects` that takes precedence over the others. explicit_coords : set, optional An explicit list of variables from `objects` that are coordinates. @@ -636,7 +638,7 @@ def merge( Parameters ---------- - objects : Iterable[Union[xarray.Dataset, xarray.DataArray, dict]] + objects : iterable of Dataset or iterable of DataArray or iterable of dict-like Merge together all variables from these objects. If any of them are DataArray objects, they must have a name. compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts', 'override'}, optional @@ -666,8 +668,8 @@ def merge( dimension must have the same size in all objects. fill_value : scalar, optional Value to use for newly missing values - combine_attrs : {'drop', 'identical', 'no_conflicts', 'override'}, - default 'drop' + combine_attrs : {'drop', 'identical', 'no_conflicts', 'override'}, \ + default: 'drop' String indicating how to combine attrs of the objects being merged: - 'drop': empty attrs on returned Dataset. diff --git a/xarray/core/parallel.py b/xarray/core/parallel.py index 07d61e595c9..5f04a3ef226 100644 --- a/xarray/core/parallel.py +++ b/xarray/core/parallel.py @@ -176,7 +176,7 @@ def map_blocks( Parameters ---------- - func: callable + func : callable User-provided function that accepts a DataArray or Dataset as its first parameter ``obj``. The function will receive a subset or 'block' of ``obj`` (see below), corresponding to one chunk along each chunked dimension. ``func`` will be @@ -186,15 +186,15 @@ def map_blocks( This function cannot add a new chunked dimension. - obj: DataArray, Dataset + obj : DataArray, Dataset Passed to the function as its first argument, one block at a time. - args: Sequence + args : sequence Passed to func after unpacking and subsetting any xarray objects by blocks. xarray objects in args must be aligned with obj, otherwise an error is raised. - kwargs: Mapping + kwargs : mapping Passed verbatim to func after unpacking. xarray objects, if any, will not be subset to blocks. Passing dask collections in kwargs is not allowed. - template: (optional) DataArray, Dataset + template : DataArray or Dataset, optional xarray object representing the final result after compute is called. If not provided, the function will be first run on mocked-up data, that looks like ``obj`` but has sizes 0, to determine properties of the returned object such as dtype, diff --git a/xarray/core/resample.py b/xarray/core/resample.py index 1b4cb49d719..08e384da1d3 100644 --- a/xarray/core/resample.py +++ b/xarray/core/resample.py @@ -29,7 +29,7 @@ def _upsample(self, method, *args, **kwargs): Parameters ---------- - method : str {'asfreq', 'pad', 'ffill', 'backfill', 'bfill', 'nearest', + method : {'asfreq', 'pad', 'ffill', 'backfill', 'bfill', 'nearest', \ 'interpolate'} Method to use for up-sampling @@ -130,8 +130,8 @@ def interpolate(self, kind="linear"): Parameters ---------- - kind : {'linear', 'nearest', 'zero', 'slinear', \ - 'quadratic', 'cubic'} + kind : {"linear", "nearest", "zero", "slinear", \ + "quadratic", "cubic"}, default: "linear" Interpolation scheme to use See Also @@ -193,7 +193,7 @@ def map(self, func, shortcut=False, args=(), **kwargs): Parameters ---------- - func : function + func : callable Callable to apply to each array. shortcut : bool, optional Whether or not to shortcut evaluation under the assumptions that: @@ -287,7 +287,7 @@ def map(self, func, args=(), shortcut=None, **kwargs): Parameters ---------- - func : function + func : callable Callable to apply to each sub-dataset. args : tuple, optional Positional arguments passed on to `func`. @@ -327,7 +327,7 @@ def reduce(self, func, dim=None, keep_attrs=None, **kwargs): Parameters ---------- - func : function + func : callable Function which can be called in the form `func(x, axis=axis, **kwargs)` to return the result of collapsing an np.ndarray over an integer valued axis. diff --git a/xarray/core/rolling.py b/xarray/core/rolling.py index ecba5307680..422c681576b 100644 --- a/xarray/core/rolling.py +++ b/xarray/core/rolling.py @@ -54,17 +54,19 @@ def __init__(self, obj, windows, min_periods=None, center=False, keep_attrs=None ---------- obj : Dataset or DataArray Object to window. - windows : A mapping from a dimension name to window size + windows : mapping of hashable to int + A mapping from a dimension name to window size + dim : str Name of the dimension to create the rolling iterator along (e.g., `time`). window : int Size of the moving window. - min_periods : int, default None + min_periods : int, default: None Minimum number of observations in window required to have a value (otherwise result is NA). The default, None, is equivalent to setting min_periods equal to the size of the window. - center : boolean, default False + center : bool, default: False Set the labels at the center of the window. keep_attrs : bool, optional If True, the object's attributes (`attrs`) will be copied from @@ -162,7 +164,9 @@ def __init__(self, obj, windows, min_periods=None, center=False, keep_attrs=None ---------- obj : DataArray Object to window. - windows : A mapping from a dimension name to window size + windows : mapping of hashable to int + A mapping from a dimension name to window size + dim : str Name of the dimension to create the rolling iterator along (e.g., `time`). @@ -217,11 +221,11 @@ def construct(self, window_dim, stride=1, fill_value=dtypes.NA): Parameters ---------- - window_dim: str + window_dim : str New name of the window dimension. - stride: integer, optional + stride : int, optional Size of stride for the rolling window. - fill_value: optional. Default dtypes.NA + fill_value : default: dtypes.NA Filling value to match the dimension size. Returns @@ -265,7 +269,7 @@ def reduce(self, func, **kwargs): Parameters ---------- - func : function + func : callable Function which can be called in the form `func(x, **kwargs)` to return the result of collapsing an np.ndarray over an the rolling dimension. @@ -402,17 +406,19 @@ def __init__(self, obj, windows, min_periods=None, center=False, keep_attrs=None ---------- obj : Dataset Object to window. - windows : A mapping from a dimension name to window size + windows : mapping of hashable to int + A mapping from a dimension name to window size + dim : str Name of the dimension to create the rolling iterator along (e.g., `time`). window : int Size of the moving window. - min_periods : int, default None + min_periods : int, default: None Minimum number of observations in window required to have a value (otherwise result is NA). The default, None, is equivalent to setting min_periods equal to the size of the window. - center : boolean, default False + center : bool, default: False Set the labels at the center of the window. keep_attrs : bool, optional If True, the object's attributes (`attrs`) will be copied from @@ -460,7 +466,7 @@ def reduce(self, func, **kwargs): Parameters ---------- - func : function + func : callable Function which can be called in the form `func(x, **kwargs)` to return the result of collapsing an np.ndarray over an the rolling dimension. @@ -498,11 +504,11 @@ def construct(self, window_dim, stride=1, fill_value=dtypes.NA, keep_attrs=None) Parameters ---------- - window_dim: str + window_dim : str New name of the window dimension. - stride: integer, optional + stride: int, optional size of stride for the rolling window. - fill_value: optional. Default dtypes.NA + fill_value: default: dtypes.NA Filling value to match the dimension size. Returns @@ -556,7 +562,9 @@ def __init__(self, obj, windows, boundary, side, coord_func, keep_attrs): ---------- obj : Dataset or DataArray Object to window. - windows : A mapping from a dimension name to window size + windows : mapping of hashable to int + A mapping from a dimension name to window size + dim : str Name of the dimension to create the rolling iterator along (e.g., `time`). diff --git a/xarray/core/rolling_exp.py b/xarray/core/rolling_exp.py index 6ef63e42291..41c60fe57cd 100644 --- a/xarray/core/rolling_exp.py +++ b/xarray/core/rolling_exp.py @@ -65,14 +65,16 @@ class RollingExp: ---------- obj : Dataset or DataArray Object to window. - windows : A single mapping from a single dimension name to window value + windows : mapping of hashable to int + A single mapping from a single dimension name to window value + dim : str Name of the dimension to create the rolling exponential window along (e.g., `time`). window : int Size of the moving window. The type of this is specified in `window_type` - window_type : str, one of ['span', 'com', 'halflife', 'alpha'], default 'span' + window_type : {"span", "com", "halflife", "alpha"}, default: "span" The format of the previously supplied window. Each is a simple numerical transformation of the others. Described in detail: https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.ewm.html From f8245b834cf65515e635338bed8a3338915cddaa Mon Sep 17 00:00:00 2001 From: Keewis Date: Sat, 8 Aug 2020 12:20:49 +0200 Subject: [PATCH 25/50] remove a few more workarounds for a sphinx bug --- xarray/plot/plot.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/xarray/plot/plot.py b/xarray/plot/plot.py index ee079f31563..9b66f15c5cc 100644 --- a/xarray/plot/plot.py +++ b/xarray/plot/plot.py @@ -267,7 +267,7 @@ def line( if None, use the default for the matplotlib function. add_legend : boolean, optional Add legend with y axis coordinates (2D inputs only). - ``*args``, ``**kwargs`` : optional + *args, **kwargs : optional Additional arguments to matplotlib.pyplot.plot """ # Handle facetgrids first @@ -353,7 +353,7 @@ def step(darray, *args, where="pre", drawstyle=None, ds=None, **kwargs): :py:func:`xarray.Dataset.groupby_bins`. In this case, the actual boundaries of the interval are used. - ``*args``, ``**kwargs`` : optional + *args, **kwargs : optional Additional arguments following :py:func:`xarray.plot.line` """ if where not in {"pre", "post", "mid"}: From 0e6d57b18d3a9b95d17a8f16f711b2b70483ac7d Mon Sep 17 00:00:00 2001 From: Keewis Date: Sun, 9 Aug 2020 00:52:12 +0200 Subject: [PATCH 26/50] use sphinx version 3.2 --- ci/requirements/doc.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/requirements/doc.yml b/ci/requirements/doc.yml index 658a7d4e47b..8c09359bb16 100644 --- a/ci/requirements/doc.yml +++ b/ci/requirements/doc.yml @@ -23,6 +23,6 @@ dependencies: - rasterio>=1.1 - seaborn - setuptools - - sphinx=3.1 + - sphinx=3.2 - sphinx_rtd_theme>=0.4 - zarr>=2.4 From bc7177f745d24ae6ca8120cf83033312a7a055d8 Mon Sep 17 00:00:00 2001 From: Keewis Date: Tue, 11 Aug 2020 11:34:04 +0200 Subject: [PATCH 27/50] remove a few misspellings of bool and optional --- xarray/core/common.py | 4 ++-- xarray/core/computation.py | 2 +- xarray/core/rolling.py | 2 +- xarray/core/variable.py | 6 +++--- xarray/plot/plot.py | 8 ++++---- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/xarray/core/common.py b/xarray/core/common.py index 98ae8715b12..48e3a5574f1 100644 --- a/xarray/core/common.py +++ b/xarray/core/common.py @@ -721,11 +721,11 @@ def groupby_bins( on each side to include the min or max values of x. If bins is a sequence it defines the bin edges allowing for non-uniform bin width. No extension of the range of x is done in this case. - right : boolean, default: True + right : bool, default: True Indicates whether the bins include the rightmost edge or not. If right == True (the default), then the bins [1,2,3,4] indicate (1,2], (2,3], (3,4]. - labels : array-like or boolean, default: None + labels : array-like or bool, default: None Used as labels for the resulting bins. Must be of the same length as the resulting bins. If False, string bin labels are assigned by `pandas.cut`. diff --git a/xarray/core/computation.py b/xarray/core/computation.py index 87704a01c47..7e9f0bc6a28 100644 --- a/xarray/core/computation.py +++ b/xarray/core/computation.py @@ -848,7 +848,7 @@ def apply_ufunc( Value used in place of missing variables on Dataset inputs when the datasets do not share the exact same ``data_vars``. Required if ``dataset_join not in {'inner', 'exact'}``, otherwise ignored. - keep_attrs: bool, Optional + keep_attrs: bool, optional Whether to copy attributes from the first argument to the output. kwargs: dict, optional Optional keyword arguments passed directly on to call ``func``. diff --git a/xarray/core/rolling.py b/xarray/core/rolling.py index 97c4d9b4f0a..92cc03c6b25 100644 --- a/xarray/core/rolling.py +++ b/xarray/core/rolling.py @@ -188,7 +188,7 @@ def __init__(self, obj, windows, min_periods=None, center=False, keep_attrs=None Minimum number of observations in window required to have a value (otherwise result is NA). The default, None, is equivalent to setting min_periods equal to the size of the window. - center : boolean, default False + center : bool, default False Set the labels at the center of the window. keep_attrs : bool, optional If True, the object's attributes (`attrs`) will be copied from diff --git a/xarray/core/variable.py b/xarray/core/variable.py index a7ec88ee258..37f294f9003 100644 --- a/xarray/core/variable.py +++ b/xarray/core/variable.py @@ -523,14 +523,14 @@ def _broadcast_indexes(self, key): Parameters ----------- - key: int, slice, array, dict or tuple of integer, slices and arrays + key: int, slice, array-like, dict or tuple of integer, slice and array-like Any valid input for indexing. Returns ------- - dims: tuple + dims : tuple Dimension of the resultant variable. - indexers: IndexingTuple subclass + indexers : IndexingTuple subclass Tuple of integer, array-like, or slices to use when indexing self._data. The type of this argument indicates the type of indexing to perform, either basic, outer or vectorized. diff --git a/xarray/plot/plot.py b/xarray/plot/plot.py index 9b66f15c5cc..9041d2c63c8 100644 --- a/xarray/plot/plot.py +++ b/xarray/plot/plot.py @@ -265,7 +265,7 @@ def line( yincrease : None, True, or False, optional Should the values on the y axes be increasing from top to bottom? if None, use the default for the matplotlib function. - add_legend : boolean, optional + add_legend : bool, optional Add legend with y axis coordinates (2D inputs only). *args, **kwargs : optional Additional arguments to matplotlib.pyplot.plot @@ -494,7 +494,7 @@ def _plot2d(plotfunc): If passed, make row faceted plots on this dimension name col : string, optional If passed, make column faceted plots on this dimension name - col_wrap : integer, optional + col_wrap : int, optional Use together with ``col`` to wrap faceted plots xscale, yscale : 'linear', 'symlog', 'log', 'logit', optional Specifies scaling for the x- and y-axes respectively @@ -506,9 +506,9 @@ def _plot2d(plotfunc): yincrease : None, True, or False, optional Should the values on the y axes be increasing from top to bottom? if None, use the default for the matplotlib function. - add_colorbar : Boolean, optional + add_colorbar : bool, optional Adds colorbar to axis - add_labels : Boolean, optional + add_labels : bool, optional Use xarray metadata to label axes norm : ``matplotlib.colors.Normalize`` instance, optional If the ``norm`` has vmin or vmax specified, the corresponding kwarg From be3015a81bb6791e8ca108fbfd313c032c9bb1c8 Mon Sep 17 00:00:00 2001 From: Keewis Date: Tue, 11 Aug 2020 14:00:08 +0200 Subject: [PATCH 28/50] fix more docstrings --- doc/conf.py | 2 ++ xarray/core/accessor_str.py | 2 +- xarray/core/alignment.py | 9 +++++---- xarray/core/combine.py | 2 +- xarray/core/concat.py | 2 +- xarray/core/merge.py | 4 ++-- xarray/core/utils.py | 2 +- 7 files changed, 13 insertions(+), 10 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index 8ced9ae8992..5d50e0834d9 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -118,6 +118,8 @@ "dict-like": ":term:`dict-like `", "mapping": ":term:`mapping`", "file-like": ":term:`file-like `", + # special terms + "nested list": "nested :class:`list`", # stdlib type aliases "MutableMapping": "~collections.abc.MutableMapping", "sys.stdout": ":obj:`sys.stdout`", diff --git a/xarray/core/accessor_str.py b/xarray/core/accessor_str.py index d3da075c17c..b71c5e3b681 100644 --- a/xarray/core/accessor_str.py +++ b/xarray/core/accessor_str.py @@ -118,7 +118,7 @@ def get(self, i): Returns ------- - items : array of objects + items : array of object """ obj = slice(-1, None) if i == -1 else slice(i, i + 1) return self._apply(lambda x: x[obj]) diff --git a/xarray/core/alignment.py b/xarray/core/alignment.py index abc180e049c..8d982c141ec 100644 --- a/xarray/core/alignment.py +++ b/xarray/core/alignment.py @@ -108,8 +108,9 @@ def align( Returns ------- - aligned : same as `*objects` - Tuple of objects with aligned coordinates. + aligned : DataArray or Dataset + Tuple of objects with the same type as `*objects` with aligned + coordinates. Raises ------ @@ -664,14 +665,14 @@ def broadcast(*args, exclude=None): Parameters ---------- - *args : DataArray or Dataset objects + *args : DataArray or Dataset Arrays to broadcast against each other. exclude : sequence of str, optional Dimensions that must not be broadcasted Returns ------- - broadcast : tuple of xarray objects + broadcast : tuple of DataArray or tuple of Dataset The same data as the input arrays, but with additional dimensions inserted so that all data arrays have the same dimensions and shape. diff --git a/xarray/core/combine.py b/xarray/core/combine.py index 65c2ffb2c63..5001cc62b1a 100644 --- a/xarray/core/combine.py +++ b/xarray/core/combine.py @@ -362,7 +362,7 @@ def combine_nested( Parameters ---------- - datasets : list or nested list of xarray.Dataset objects. + datasets : list or nested list of Dataset Dataset objects to combine. If concatenation or merging along more than one dimension is desired, then datasets must be supplied in a nested list-of-lists. diff --git a/xarray/core/concat.py b/xarray/core/concat.py index b42c91c232d..144bfca96ec 100644 --- a/xarray/core/concat.py +++ b/xarray/core/concat.py @@ -23,7 +23,7 @@ def concat( Parameters ---------- - objs : sequence of Dataset and DataArray objects + objs : sequence of Dataset and DataArray xarray objects to concatenate together. Each object is expected to consist of variables and coordinates with matching shapes except for along the concatenated dimension. diff --git a/xarray/core/merge.py b/xarray/core/merge.py index 62329b2f25b..c76daeb3153 100644 --- a/xarray/core/merge.py +++ b/xarray/core/merge.py @@ -373,7 +373,7 @@ def coerce_pandas_values(objects: Iterable["CoercibleMapping"]) -> List["Dataset Parameters ---------- - objects : list of Dataset or mappings + objects : list of Dataset or mapping The mappings may contain any sort of objects coercible to xarray.Variables as keys, including pandas objects. @@ -412,7 +412,7 @@ def _get_priority_vars_and_indexes( Parameters ---------- - objects : list of dict-like of variables + objects : list of dict-like of Variable Dictionaries in which to find the priority variables. priority_arg : int or None Integer object whose variable should take priority. diff --git a/xarray/core/utils.py b/xarray/core/utils.py index 668405ba574..ac060215848 100644 --- a/xarray/core/utils.py +++ b/xarray/core/utils.py @@ -116,7 +116,7 @@ def multiindex_from_product_levels( ---------- levels : sequence of pd.Index Values for each MultiIndex level. - names : optional sequence of objects + names : sequence of str, optional Names for each level. Returns From 1d384b35723e65fd5b3f3d9be32a340e2f390923 Mon Sep 17 00:00:00 2001 From: Keewis Date: Tue, 11 Aug 2020 17:49:38 +0200 Subject: [PATCH 29/50] more fixes --- doc/conf.py | 8 +++++++- doc/whats-new.rst | 3 +-- xarray/core/accessor_str.py | 8 ++++---- xarray/core/common.py | 16 +++++++++------- xarray/core/dataarray.py | 6 +++--- xarray/plot/dataset_plot.py | 6 +++--- 6 files changed, 27 insertions(+), 20 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index 5d50e0834d9..fdb584b6067 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -120,17 +120,23 @@ "file-like": ":term:`file-like `", # special terms "nested list": "nested :class:`list`", + # "same type as caller": "*same type as caller*", # does not work, yet + # "same type as values": "*same type as values*", # does not work, yet # stdlib type aliases "MutableMapping": "~collections.abc.MutableMapping", "sys.stdout": ":obj:`sys.stdout`", "timedelta": "~datetime.timedelta", + "string": ":class:`string `", # numpy terms "array_like": ":term:`array_like`", "array-like": ":term:`array-like `", # "scalar": ":term:`scalar`", "array": ":term:`array`", # matplotlib terms - "color-like": ":py:func:`is_color_like`", + "color-like": ":py:func:`color-like `", + "matplotlib colormap name": ":doc:matplotlib colormap name ", + "matplotlib axes object": ":py:class:`matplotlib axes object `", + "colormap": ":py:class:`colormap `", # objects without namespace "DataArray": "~xarray.DataArray", "Dataset": "~xarray.Dataset", diff --git a/doc/whats-new.rst b/doc/whats-new.rst index 4639e258288..a5ec4560f24 100644 --- a/doc/whats-new.rst +++ b/doc/whats-new.rst @@ -25,14 +25,13 @@ Breaking changes New Features ~~~~~~~~~~~~ -- Build ``CFTimeIndex.__repr__`` explicitly as :py:class:`pandas.Index`. Add ``calendar`` as a new - :py:meth:`~xarray.DataArray.rolling` and :py:meth:`~xarray.Dataset.rolling` now accept more than 1 dimension.(:pull:`4219`) By `Keisuke Fujii `_. - Build :py:meth:`CFTimeIndex.__repr__` explicitly as :py:class:`pandas.Index`. Add ``calendar`` as a new property for :py:class:`CFTimeIndex` and show ``calendar`` and ``length`` in ``CFTimeIndex.__repr__`` (:issue:`2416`, :pull:`4092`) - `Aaron Spring `_. + By `Aaron Spring `_. - Use a wrapped array's ``_repr_inline_`` method to construct the collapsed ``repr`` of :py:class:`DataArray` and :py:class:`Dataset` objects and document the new method in :doc:`internals`. (:pull:`4248`). diff --git a/xarray/core/accessor_str.py b/xarray/core/accessor_str.py index b71c5e3b681..d44ed97d144 100644 --- a/xarray/core/accessor_str.py +++ b/xarray/core/accessor_str.py @@ -748,7 +748,7 @@ def find(self, sub, start=0, end=None, side="left"): Returns ------- - found : array of integer values + found : array of int """ sub = self._obj.dtype.type(sub) @@ -783,7 +783,7 @@ def rfind(self, sub, start=0, end=None): Returns ------- - found : array of integer values + found : array of int """ return self.find(sub, start=start, end=end, side="right") @@ -807,7 +807,7 @@ def index(self, sub, start=0, end=None, side="left"): Returns ------- - found : array of integer values + found : array of int """ sub = self._obj.dtype.type(sub) @@ -843,7 +843,7 @@ def rindex(self, sub, start=0, end=None): Returns ------- - found : array of integer values + found : array of int """ return self.index(sub, start=start, end=end, side="right") diff --git a/xarray/core/common.py b/xarray/core/common.py index 48e3a5574f1..e51e9e7a16d 100644 --- a/xarray/core/common.py +++ b/xarray/core/common.py @@ -528,7 +528,8 @@ def pipe( Returns ------- - object : the return type of ``func``. + object : Any + the return type of ``func``. Notes ----- @@ -649,7 +650,7 @@ def groupby(self, group, squeeze: bool = True, restore_coord_dims: bool = None): Returns ------- - grouped : GroupBy + grouped A `GroupBy` object patterned after `pandas.GroupBy` that can be iterated over in the form of `(unique_value, grouped_array)` pairs. @@ -743,7 +744,7 @@ def groupby_bins( Returns ------- - grouped : GroupBy + grouped A `GroupBy` object patterned after `pandas.GroupBy` that can be iterated over in the form of `(unique_value, grouped_array)` pairs. The name of the group has the added suffix `_bins` in order to @@ -1161,7 +1162,8 @@ def where(self, cond, other=dtypes.NA, drop: bool = False): Returns ------- - Same xarray type as caller, with dtype float64. + DataArray or Dataset + Same xarray type as caller, with dtype float64. Examples -------- @@ -1270,8 +1272,8 @@ def isin(self, test_elements): Returns ------- - isin : same as object, bool - Has the same shape as this object. + isin : DataArray or Dataset + Has the same type and shape as this object, but with a bool dtype. Examples -------- @@ -1456,7 +1458,7 @@ def zeros_like(other, dtype: DTypeLike = None): Returns ------- - out + out : DataArray, Dataset or Variable New object of zeros with the same shape and type as other. Examples diff --git a/xarray/core/dataarray.py b/xarray/core/dataarray.py index 01327050b32..a7e03a6e246 100644 --- a/xarray/core/dataarray.py +++ b/xarray/core/dataarray.py @@ -1628,14 +1628,14 @@ def expand_dims( Parameters ---------- - dim : hashable, sequence of hashable, dict, or None + dim : hashable, sequence of hashable, dict, or None, optional Dimensions to include on the new variable. If provided as str or sequence of str, then dimensions are inserted with length 1. If provided as a dict, then the keys are the new dimensions and the values are either integers (giving the length of the new dimensions) or sequence/ndarray (giving the coordinates of the new dimensions). - axis : integer, list of int or or tuple of int, or None + axis : int, list of int or tuple of int, or None, default: None Axis position(s) where new axis is to be inserted (position(s) on the result array). If a list (or tuple) of integers is passed, multiple axes are inserted. In this case, dim arguments should be @@ -2642,7 +2642,7 @@ def identical(self, other: "DataArray") -> bool: See Also -------- DataArray.broadcast_equals - DataArray.equal + DataArray.equals """ try: return self.name == other.name and self._all_compat(other, "identical") diff --git a/xarray/plot/dataset_plot.py b/xarray/plot/dataset_plot.py index d9fe83ab217..2f2fc3e59b8 100644 --- a/xarray/plot/dataset_plot.py +++ b/xarray/plot/dataset_plot.py @@ -191,7 +191,7 @@ def _dsplot(plotfunc): If passed, make column faceted plots on this dimension name col_wrap : int, optional Use together with ``col`` to wrap faceted plots - ax : matplotlib.axes.Axes, optional + ax : matplotlib axes object, optional If None, uses the current axis. Not applicable when using facets. subplot_kws : dict, optional Dictionary of keyword arguments for matplotlib subplots. Only applies @@ -205,14 +205,14 @@ def _dsplot(plotfunc): norm : ``matplotlib.colors.Normalize`` instance, optional If the ``norm`` has vmin or vmax specified, the corresponding kwarg must be None. - vmin, vmax : floats, optional + vmin, vmax : float, optional Values to anchor the colormap, otherwise they are inferred from the data and other keyword arguments. When a diverging dataset is inferred, setting one of these values will fix the other by symmetry around ``center``. Setting both values prevents use of a diverging colormap. If discrete levels are provided as an explicit list, both of these values are ignored. - cmap : str or matplotlib.colors.Colormap, optional + cmap : str or colormap, optional The mapping from data values to color space. Either a matplotlib colormap name or object. If not provided, this will be either ``viridis`` (if the function infers a sequential From 18af38e44304ac96ecf16757c3fe5f863429430c Mon Sep 17 00:00:00 2001 From: Keewis Date: Tue, 11 Aug 2020 18:17:35 +0200 Subject: [PATCH 30/50] remove the mixed markup since it is not supported by the preprocessor --- doc/conf.py | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/conf.py b/doc/conf.py index fdb584b6067..dbc0af90193 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -119,7 +119,6 @@ "mapping": ":term:`mapping`", "file-like": ":term:`file-like `", # special terms - "nested list": "nested :class:`list`", # "same type as caller": "*same type as caller*", # does not work, yet # "same type as values": "*same type as values*", # does not work, yet # stdlib type aliases From 45d4702384d11b5c2c00f8bf3655a6ce22c9b4ee Mon Sep 17 00:00:00 2001 From: Keewis Date: Tue, 11 Aug 2020 23:26:55 +0200 Subject: [PATCH 31/50] use double instead of single quotes in docstrings --- xarray/backends/api.py | 88 ++++++++++++++++----------------- xarray/core/accessor_str.py | 10 ++-- xarray/core/alignment.py | 14 +++--- xarray/core/combine.py | 98 ++++++++++++++++++------------------- xarray/core/common.py | 10 ++-- xarray/core/concat.py | 60 +++++++++++------------ xarray/core/dataarray.py | 10 ++-- xarray/core/dataset.py | 24 ++++----- xarray/core/merge.py | 48 +++++++++--------- xarray/core/ops.py | 22 ++++----- xarray/core/resample.py | 4 +- xarray/core/variable.py | 6 +-- xarray/plot/dataset_plot.py | 2 +- xarray/plot/plot.py | 10 ++-- 14 files changed, 203 insertions(+), 203 deletions(-) diff --git a/xarray/backends/api.py b/xarray/backends/api.py index bfca4ddc80c..b84a80c8232 100644 --- a/xarray/backends/api.py +++ b/xarray/backends/api.py @@ -343,11 +343,11 @@ def open_dataset( decode_coords : bool, optional If True, decode the 'coordinates' attribute to identify coordinates in the resulting dataset. - engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio', 'cfgrib', \ - 'pseudonetcdf'}, optional + engine : {"netcdf4", "scipy", "pydap", "h5netcdf", "pynio", "cfgrib", \ + "pseudonetcdf"}, optional Engine to use when reading files. If not provided, the default engine is chosen based on available dependencies, with a preference for - 'netcdf4'. + "netcdf4". chunks : int or dict, optional If chunks is provided, it used to load the new dataset into dask arrays. ``chunks={}`` loads the dataset with dask using a single @@ -374,7 +374,7 @@ def open_dataset( allow user control of dataset processing. use_cftime: bool, optional Only relevant if encoded dates come from a standard calendar - (e.g. 'gregorian', 'proleptic_gregorian', 'standard', or not + (e.g. "gregorian", "proleptic_gregorian", "standard", or not specified). If None (default), attempt to decode times to ``np.datetime64[ns]`` objects; if this is not possible, decode times to ``cftime.datetime`` objects. If True, always decode times to @@ -384,7 +384,7 @@ def open_dataset( raise an error. decode_timedelta : bool, optional If True, decode variables and coordinates with time units in - {'days', 'hours', 'minutes', 'seconds', 'milliseconds', 'microseconds'} + {"days", "hours", "minutes", "seconds", "milliseconds", "microseconds"} into timedelta objects. If False, leave them encoded as numbers. If None (default), assume the same value of decode_time. @@ -610,11 +610,11 @@ def open_dataarray( decode_coords : bool, optional If True, decode the 'coordinates' attribute to identify coordinates in the resulting dataset. - engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio', 'cfgrib'}, \ + engine : {"netcdf4", "scipy", "pydap", "h5netcdf", "pynio", "cfgrib"}, \ optional Engine to use when reading files. If not provided, the default engine is chosen based on available dependencies, with a preference for - 'netcdf4'. + "netcdf4". chunks : int or dict, optional If chunks is provided, it used to load the new dataset into dask arrays. @@ -640,7 +640,7 @@ def open_dataarray( allow user control of dataset processing. use_cftime: bool, optional Only relevant if encoded dates come from a standard calendar - (e.g. 'gregorian', 'proleptic_gregorian', 'standard', or not + (e.g. "gregorian", "proleptic_gregorian", "standard", or not specified). If None (default), attempt to decode times to ``np.datetime64[ns]`` objects; if this is not possible, decode times to ``cftime.datetime`` objects. If True, always decode times to @@ -650,7 +650,7 @@ def open_dataarray( raise an error. decode_timedelta : bool, optional If True, decode variables and coordinates with time units in - {'days', 'hours', 'minutes', 'seconds', 'milliseconds', 'microseconds'} + {"days", "hours", "minutes", "seconds", "milliseconds", "microseconds"} into timedelta objects. If False, leave them encoded as numbers. If None (default), assume the same value of decode_time. @@ -772,77 +772,77 @@ def open_mfdataset( particular dimension. Default is None, which for a 1D list of filepaths is equivalent to opening the files separately and then merging them with ``xarray.merge``. - combine : {'by_coords', 'nested'}, optional + combine : {"by_coords", "nested"}, optional Whether ``xarray.combine_by_coords`` or ``xarray.combine_nested`` is used to combine all the data. Default is to use ``xarray.combine_by_coords``. - compat : {'identical', 'equals', 'broadcast_equals', \ - 'no_conflicts', 'override'}, optional + compat : {"identical", "equals", "broadcast_equals", \ + "no_conflicts", "override"}, optional String indicating how to compare variables of the same name for potential conflicts when merging: - * 'broadcast_equals': all values must be equal when variables are + * "broadcast_equals": all values must be equal when variables are broadcast against each other to ensure common dimensions. - * 'equals': all values and dimensions must be the same. - * 'identical': all values, dimensions and attributes must be the + * "equals": all values and dimensions must be the same. + * "identical": all values, dimensions and attributes must be the same. - * 'no_conflicts': only values which are not null in both datasets + * "no_conflicts": only values which are not null in both datasets must be equal. The returned dataset then contains the combination of all non-null values. - * 'override': skip comparing and pick variable from first dataset + * "override": skip comparing and pick variable from first dataset preprocess : callable, optional If provided, call this function on each dataset prior to concatenation. You can find the file-name from which each dataset was loaded in - ``ds.encoding['source']``. - engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio', 'cfgrib'}, \ + ``ds.encoding["source"]``. + engine : {"netcdf4", "scipy", "pydap", "h5netcdf", "pynio", "cfgrib"}, \ optional Engine to use when reading files. If not provided, the default engine is chosen based on available dependencies, with a preference for - 'netcdf4'. + "netcdf4". lock : False or lock-like, optional Resource lock to use when reading data from disk. Only relevant when using dask or another form of parallelism. By default, appropriate locks are chosen to safely read and write files with the currently active dask scheduler. - data_vars : {'minimal', 'different', 'all'} or list of str, optional + data_vars : {"minimal", "different", "all"} or list of str, optional These data variables will be concatenated together: - * 'minimal': Only data variables in which the dimension already + * "minimal": Only data variables in which the dimension already appears are included. - * 'different': Data variables which are not equal (ignoring + * "different": Data variables which are not equal (ignoring attributes) across all datasets are also concatenated (as well as all for which dimension already appears). Beware: this option may load the data payload of data variables into memory if they are not already loaded. - * 'all': All data variables will be concatenated. + * "all": All data variables will be concatenated. * list of str: The listed data variables will be concatenated, in - addition to the 'minimal' data variables. - coords : {'minimal', 'different', 'all'} or list of str, optional + addition to the "minimal" data variables. + coords : {"minimal", "different", "all"} or list of str, optional These coordinate variables will be concatenated together: - * 'minimal': Only coordinates in which the dimension already appears + * "minimal": Only coordinates in which the dimension already appears are included. - * 'different': Coordinates which are not equal (ignoring attributes) + * "different": Coordinates which are not equal (ignoring attributes) across all datasets are also concatenated (as well as all for which dimension already appears). Beware: this option may load the data payload of coordinate variables into memory if they are not already loaded. - * 'all': All coordinate variables will be concatenated, except + * "all": All coordinate variables will be concatenated, except those corresponding to other dimensions. * list of str: The listed coordinate variables will be concatenated, - in addition the 'minimal' coordinates. + in addition the "minimal" coordinates. parallel : bool, optional If True, the open and preprocess steps of this function will be performed in parallel using ``dask.delayed``. Default is False. - join : {'outer', 'inner', 'left', 'right', 'exact, 'override'}, optional + join : {"outer", "inner", "left", "right", "exact, "override"}, optional String indicating how to combine differing indexes (excluding concat_dim) in objects - - 'outer': use the union of object indexes - - 'inner': use the intersection of object indexes - - 'left': use indexes from the first object with each dimension - - 'right': use indexes from the last object with each dimension - - 'exact': instead of aligning, raise `ValueError` when indexes to be + - "outer": use the union of object indexes + - "inner": use the intersection of object indexes + - "left": use indexes from the first object with each dimension + - "right": use indexes from the last object with each dimension + - "exact": instead of aligning, raise `ValueError` when indexes to be aligned are not equal - - 'override': if indexes are of same size, rewrite indexes to be + - "override": if indexes are of same size, rewrite indexes to be those of the first object with that dimension. Indexes for the same dimension must have the same size in all objects. attrs_file : str or pathlib.Path, optional @@ -1146,11 +1146,11 @@ def save_mfdataset( List of datasets to save. paths : list of str or list of Path List of paths to which to save each corresponding dataset. - mode : {'w', 'a'}, optional - Write ('w') or append ('a') mode. If mode='w', any existing file at + mode : {"w", "a"}, optional + Write ("w") or append ("a") mode. If mode="w", any existing file at these locations will be overwritten. - format : {'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_64BIT', \ - 'NETCDF3_CLASSIC'}, optional + format : {"NETCDF4", "NETCDF4_CLASSIC", "NETCDF3_64BIT", \ + "NETCDF3_CLASSIC"}, optional File format for the resulting netCDF file: @@ -1173,12 +1173,12 @@ def save_mfdataset( NETCDF3_64BIT format (scipy does not support netCDF4). groups : list of str, optional Paths to the netCDF4 group in each corresponding file to which to save - datasets (only works for format='NETCDF4'). The groups will be created + datasets (only works for format="NETCDF4"). The groups will be created if necessary. - engine : {'netcdf4', 'scipy', 'h5netcdf'}, optional + engine : {"netcdf4", "scipy", "h5netcdf"}, optional Engine to use when writing netCDF files. If not provided, the default engine is chosen based on available dependencies, with a - preference for 'netcdf4' if writing to a file on disk. + preference for "netcdf4" if writing to a file on disk. See `Dataset.to_netcdf` for additional information. compute : bool If true compute immediately, otherwise return a diff --git a/xarray/core/accessor_str.py b/xarray/core/accessor_str.py index d44ed97d144..ccaa7a56380 100644 --- a/xarray/core/accessor_str.py +++ b/xarray/core/accessor_str.py @@ -404,9 +404,9 @@ def pad(self, width, side="left", fillchar=" "): width : int Minimum width of resulting string; additional characters will be filled with character defined in `fillchar`. - side : {'left', 'right', 'both'}, default: 'left' + side : {"left", "right", "both"}, default: "left" Side from which to fill resulting string. - fillchar : str, default: ' ' + fillchar : str, default: " " Additional character for filling, default is whitespace. Returns @@ -591,7 +591,7 @@ def strip(self, to_strip=None, side="both"): Specifying the set of characters to be removed. All combinations of this set of characters will be stripped. If None then whitespaces are removed. - side : {'left', 'right', 'both'}, default: 'left' + side : {"left", "right", "both"}, default: "left" Side from which to strip. Returns @@ -743,7 +743,7 @@ def find(self, sub, start=0, end=None, side="left"): Left edge index end : int Right edge index - side : {'left', 'right'}, default: 'left' + side : {"left", "right"}, default: "left" Starting side for search. Returns @@ -802,7 +802,7 @@ def index(self, sub, start=0, end=None, side="left"): Left edge index end : int Right edge index - side : {'left', 'right'}, default: 'left' + side : {"left", "right"}, default: "left" Starting side for search. Returns diff --git a/xarray/core/alignment.py b/xarray/core/alignment.py index 8d982c141ec..a7fcdc280ff 100644 --- a/xarray/core/alignment.py +++ b/xarray/core/alignment.py @@ -80,17 +80,17 @@ def align( ---------- *objects : Dataset or DataArray Objects to align. - join : {'outer', 'inner', 'left', 'right', 'exact', 'override'}, optional + join : {"outer", "inner", "left", "right", "exact", "override"}, optional Method for joining the indexes of the passed objects along each dimension: - - 'outer': use the union of object indexes - - 'inner': use the intersection of object indexes - - 'left': use indexes from the first object with each dimension - - 'right': use indexes from the last object with each dimension - - 'exact': instead of aligning, raise `ValueError` when indexes to be + - "outer": use the union of object indexes + - "inner": use the intersection of object indexes + - "left": use indexes from the first object with each dimension + - "right": use indexes from the last object with each dimension + - "exact": instead of aligning, raise `ValueError` when indexes to be aligned are not equal - - 'override': if indexes are of same size, rewrite indexes to be + - "override": if indexes are of same size, rewrite indexes to be those of the first object with that dimension. Indexes for the same dimension must have the same size in all objects. copy : bool, optional diff --git a/xarray/core/combine.py b/xarray/core/combine.py index 5001cc62b1a..ed582cc563f 100644 --- a/xarray/core/combine.py +++ b/xarray/core/combine.py @@ -375,48 +375,48 @@ def combine_nested( nested-list input along which to merge. Must be the same length as the depth of the list passed to ``datasets``. - compat : {'identical', 'equals', 'broadcast_equals', \ - 'no_conflicts', 'override'}, optional + compat : {"identical", "equals", "broadcast_equals", \ + "no_conflicts", "override"}, optional String indicating how to compare variables of the same name for potential merge conflicts: - - 'broadcast_equals': all values must be equal when variables are + - "broadcast_equals": all values must be equal when variables are broadcast against each other to ensure common dimensions. - - 'equals': all values and dimensions must be the same. - - 'identical': all values, dimensions and attributes must be the + - "equals": all values and dimensions must be the same. + - "identical": all values, dimensions and attributes must be the same. - - 'no_conflicts': only values which are not null in both datasets + - "no_conflicts": only values which are not null in both datasets must be equal. The returned dataset then contains the combination of all non-null values. - - 'override': skip comparing and pick variable from first dataset - data_vars : {'minimal', 'different', 'all' or list of str}, optional + - "override": skip comparing and pick variable from first dataset + data_vars : {"minimal", "different", "all" or list of str}, optional Details are in the documentation of concat - coords : {'minimal', 'different', 'all' or list of str}, optional + coords : {"minimal", "different", "all" or list of str}, optional Details are in the documentation of concat fill_value : scalar, optional Value to use for newly missing values - join : {'outer', 'inner', 'left', 'right', 'exact'}, optional + join : {"outer", "inner", "left", "right", "exact"}, optional String indicating how to combine differing indexes (excluding concat_dim) in objects - - 'outer': use the union of object indexes - - 'inner': use the intersection of object indexes - - 'left': use indexes from the first object with each dimension - - 'right': use indexes from the last object with each dimension - - 'exact': instead of aligning, raise `ValueError` when indexes to be + - "outer": use the union of object indexes + - "inner": use the intersection of object indexes + - "left": use indexes from the first object with each dimension + - "right": use indexes from the last object with each dimension + - "exact": instead of aligning, raise `ValueError` when indexes to be aligned are not equal - - 'override': if indexes are of same size, rewrite indexes to be + - "override": if indexes are of same size, rewrite indexes to be those of the first object with that dimension. Indexes for the same dimension must have the same size in all objects. - combine_attrs : {'drop', 'identical', 'no_conflicts', 'override'}, - default 'drop' + combine_attrs : {"drop", "identical", "no_conflicts", "override"}, \ + default: "drop" String indicating how to combine attrs of the objects being merged: - - 'drop': empty attrs on returned Dataset. - - 'identical': all attrs must be the same on every object. - - 'no_conflicts': attrs from all objects are combined, any that have + - "drop": empty attrs on returned Dataset. + - "identical": all attrs must be the same on every object. + - "no_conflicts": attrs from all objects are combined, any that have the same name must also have the same value. - - 'override': skip comparing and copy attrs from the first dataset to + - "override": skip comparing and copy attrs from the first dataset to the result. Returns @@ -541,61 +541,61 @@ def combine_by_coords( ---------- datasets : sequence of xarray.Dataset Dataset objects to combine. - compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts', 'override'}, optional + compat : {"identical", "equals", "broadcast_equals", "no_conflicts", "override"}, optional String indicating how to compare variables of the same name for potential conflicts: - - 'broadcast_equals': all values must be equal when variables are + - "broadcast_equals": all values must be equal when variables are broadcast against each other to ensure common dimensions. - - 'equals': all values and dimensions must be the same. - - 'identical': all values, dimensions and attributes must be the + - "equals": all values and dimensions must be the same. + - "identical": all values, dimensions and attributes must be the same. - - 'no_conflicts': only values which are not null in both datasets + - "no_conflicts": only values which are not null in both datasets must be equal. The returned dataset then contains the combination of all non-null values. - - 'override': skip comparing and pick variable from first dataset - data_vars : {'minimal', 'different', 'all' or list of str}, optional + - "override": skip comparing and pick variable from first dataset + data_vars : {"minimal", "different", "all" or list of str}, optional These data variables will be concatenated together: - * 'minimal': Only data variables in which the dimension already + * "minimal": Only data variables in which the dimension already appears are included. - * 'different': Data variables which are not equal (ignoring + * "different": Data variables which are not equal (ignoring attributes) across all datasets are also concatenated (as well as all for which dimension already appears). Beware: this option may load the data payload of data variables into memory if they are not already loaded. - * 'all': All data variables will be concatenated. + * "all": All data variables will be concatenated. * list of str: The listed data variables will be concatenated, in - addition to the 'minimal' data variables. + addition to the "minimal" data variables. - If objects are DataArrays, `data_vars` must be 'all'. - coords : {'minimal', 'different', 'all' or list of str}, optional - As per the 'data_vars' kwarg, but for coordinate variables. + If objects are DataArrays, `data_vars` must be "all". + coords : {"minimal", "different", "all" or list of str}, optional + As per the "data_vars" kwarg, but for coordinate variables. fill_value : scalar, optional Value to use for newly missing values. If None, raises a ValueError if the passed Datasets do not create a complete hypercube. - join : {'outer', 'inner', 'left', 'right', 'exact'}, optional + join : {"outer", "inner", "left", "right", "exact"}, optional String indicating how to combine differing indexes (excluding concat_dim) in objects - - 'outer': use the union of object indexes - - 'inner': use the intersection of object indexes - - 'left': use indexes from the first object with each dimension - - 'right': use indexes from the last object with each dimension - - 'exact': instead of aligning, raise `ValueError` when indexes to be + - "outer": use the union of object indexes + - "inner": use the intersection of object indexes + - "left": use indexes from the first object with each dimension + - "right": use indexes from the last object with each dimension + - "exact": instead of aligning, raise `ValueError` when indexes to be aligned are not equal - - 'override': if indexes are of same size, rewrite indexes to be + - "override": if indexes are of same size, rewrite indexes to be those of the first object with that dimension. Indexes for the same dimension must have the same size in all objects. - combine_attrs : {'drop', 'identical', 'no_conflicts', 'override'}, - default 'drop' + combine_attrs : {"drop", "identical", "no_conflicts", "override"}, \ + default: "drop" String indicating how to combine attrs of the objects being merged: - - 'drop': empty attrs on returned Dataset. - - 'identical': all attrs must be the same on every object. - - 'no_conflicts': attrs from all objects are combined, any that have + - "drop": empty attrs on returned Dataset. + - "identical": all attrs must be the same on every object. + - "no_conflicts": attrs from all objects are combined, any that have the same name must also have the same value. - - 'override': skip comparing and copy attrs from the first dataset to + - "override": skip comparing and copy attrs from the first dataset to the result. Returns diff --git a/xarray/core/common.py b/xarray/core/common.py index e51e9e7a16d..656ccbe5031 100644 --- a/xarray/core/common.py +++ b/xarray/core/common.py @@ -890,7 +890,7 @@ def rolling_exp( window : int Size of the moving window. The type of this is specified in `window_type` - window_type : {'span', 'com', 'halflife', 'alpha'}, default: 'span' + window_type : {"span", "com", "halflife", "alpha"}, default: "span" The format of the previously supplied window. Each is a simple numerical transformation of the others. Described in detail: https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.ewm.html @@ -932,7 +932,7 @@ def coarsen( If 'exact', a ValueError will be raised if dimension size is not a multiple of the window size. If 'trim', the excess entries are dropped. If 'pad', NA will be padded. - side : {'left', 'right'} or mapping of str to {"left", "right"} + side : {"left", "right"} or mapping of str to {"left", "right"} coord_func : str or mapping of hashable to str, default: "mean" function (name) that is applied to the coordinates, or a mapping from coordinate name to function (name). @@ -1014,13 +1014,13 @@ def resample( dimension must be datetime-like. skipna : bool, optional Whether to skip missing values when aggregating in downsampling. - closed : {'left', 'right'}, optional + closed : {"left", "right"}, optional Side of each interval to treat as closed. - label : {'left', 'right'}, optional + label : {"left", "right"}, optional Side of each interval to use for labeling. base : int, optional For frequencies that evenly subdivide 1 day, the "origin" of the - aggregated intervals. For example, for '24H' frequency, base could + aggregated intervals. For example, for "24H" frequency, base could range from 0 through 23. loffset : timedelta or str, optional Offset used to adjust the resampled time labels. Some pandas date diff --git a/xarray/core/concat.py b/xarray/core/concat.py index 144bfca96ec..b389e2a85e8 100644 --- a/xarray/core/concat.py +++ b/xarray/core/concat.py @@ -34,74 +34,74 @@ def concat( unchanged. If dimension is provided as a DataArray or Index, its name is used as the dimension to concatenate along and the values are added as a coordinate. - data_vars : {'minimal', 'different', 'all' or list of str}, optional + data_vars : {"minimal", "different", "all"} or list of str, optional These data variables will be concatenated together: - * 'minimal': Only data variables in which the dimension already + * "minimal": Only data variables in which the dimension already appears are included. - * 'different': Data variables which are not equal (ignoring + * "different": Data variables which are not equal (ignoring attributes) across all datasets are also concatenated (as well as all for which dimension already appears). Beware: this option may load the data payload of data variables into memory if they are not already loaded. - * 'all': All data variables will be concatenated. + * "all": All data variables will be concatenated. * list of str: The listed data variables will be concatenated, in - addition to the 'minimal' data variables. + addition to the "minimal" data variables. - If objects are DataArrays, data_vars must be 'all'. - coords : {'minimal', 'different', 'all' or list of str}, optional + If objects are DataArrays, data_vars must be "all". + coords : {"minimal", "different", "all"} or list of str, optional These coordinate variables will be concatenated together: - * 'minimal': Only coordinates in which the dimension already appears + * "minimal": Only coordinates in which the dimension already appears are included. - * 'different': Coordinates which are not equal (ignoring attributes) + * "different": Coordinates which are not equal (ignoring attributes) across all datasets are also concatenated (as well as all for which dimension already appears). Beware: this option may load the data payload of coordinate variables into memory if they are not already loaded. - * 'all': All coordinate variables will be concatenated, except + * "all": All coordinate variables will be concatenated, except those corresponding to other dimensions. * list of str: The listed coordinate variables will be concatenated, - in addition to the 'minimal' coordinates. - compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts', 'override'}, optional + in addition to the "minimal" coordinates. + compat : {"identical", "equals", "broadcast_equals", "no_conflicts", "override"}, optional String indicating how to compare non-concatenated variables of the same name for potential conflicts. This is passed down to merge. - - 'broadcast_equals': all values must be equal when variables are + - "broadcast_equals": all values must be equal when variables are broadcast against each other to ensure common dimensions. - - 'equals': all values and dimensions must be the same. - - 'identical': all values, dimensions and attributes must be the + - "equals": all values and dimensions must be the same. + - "identical": all values, dimensions and attributes must be the same. - - 'no_conflicts': only values which are not null in both datasets + - "no_conflicts": only values which are not null in both datasets must be equal. The returned dataset then contains the combination of all non-null values. - - 'override': skip comparing and pick variable from first dataset + - "override": skip comparing and pick variable from first dataset positions : None or list of integer arrays, optional List of integer arrays which specifies the integer positions to which to assign each dataset along the concatenated dimension. If not supplied, objects are concatenated in the provided order. fill_value : scalar, optional Value to use for newly missing values - join : {'outer', 'inner', 'left', 'right', 'exact'}, optional + join : {"outer", "inner", "left", "right", "exact"}, optional String indicating how to combine differing indexes (excluding dim) in objects - - 'outer': use the union of object indexes - - 'inner': use the intersection of object indexes - - 'left': use indexes from the first object with each dimension - - 'right': use indexes from the last object with each dimension - - 'exact': instead of aligning, raise `ValueError` when indexes to be + - "outer": use the union of object indexes + - "inner": use the intersection of object indexes + - "left": use indexes from the first object with each dimension + - "right": use indexes from the last object with each dimension + - "exact": instead of aligning, raise `ValueError` when indexes to be aligned are not equal - - 'override': if indexes are of same size, rewrite indexes to be + - "override": if indexes are of same size, rewrite indexes to be those of the first object with that dimension. Indexes for the same dimension must have the same size in all objects. - combine_attrs : {'drop', 'identical', 'no_conflicts', 'override'}, - default 'override + combine_attrs : {"drop", "identical", "no_conflicts", "override"}, \ + default: "override String indicating how to combine attrs of the objects being merged: - - 'drop': empty attrs on returned Dataset. - - 'identical': all attrs must be the same on every object. - - 'no_conflicts': attrs from all objects are combined, any that have + - "drop": empty attrs on returned Dataset. + - "identical": all attrs must be the same on every object. + - "no_conflicts": attrs from all objects are combined, any that have the same name must also have the same value. - - 'override': skip comparing and copy attrs from the first dataset to + - "override": skip comparing and copy attrs from the first dataset to the result. Returns diff --git a/xarray/core/dataarray.py b/xarray/core/dataarray.py index a7e03a6e246..b9a9b017efd 100644 --- a/xarray/core/dataarray.py +++ b/xarray/core/dataarray.py @@ -3243,8 +3243,8 @@ def integrate( ---------- dim : hashable, or sequence of hashable Coordinate(s) used for the integration. - datetime_unit : {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', \ - 'ps', 'fs', 'as'}, optional + datetime_unit : {"Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", \ + "ps", "fs", "as"}, optional Can be used to specify the unit if datetime coordinate is used. Returns @@ -3558,10 +3558,10 @@ def pad( ``(constant,)`` or ``constant`` is a shortcut for ``before = after = constant`` for all axes. Default is 0. - reflect_type : {'even', 'odd'}, optional - Used in 'reflect', and 'symmetric'. The 'even' style is the + reflect_type : {"even", "odd"}, optional + Used in "reflect", and "symmetric". The "even" style is the default with an unaltered reflection around the edge value. For - the 'odd' style, the extended part of the array is created by + the "odd" style, the extended part of the array is created by subtracting the reflected values from two times the edge value. **pad_width_kwargs The keyword arguments form of ``pad_width``. diff --git a/xarray/core/dataset.py b/xarray/core/dataset.py index e653f7d4a8d..95910a85156 100644 --- a/xarray/core/dataset.py +++ b/xarray/core/dataset.py @@ -1589,12 +1589,12 @@ def to_zarr( ---------- store : MutableMapping, str or Path, optional Store or path to directory in file system. - mode : {'w', 'w-', 'a', None}, optional - Persistence mode: 'w' means create (overwrite if exists); - 'w-' means create (fail if exists); - 'a' means override existing variables (create if does not exist). + mode : {"w", "w-", "a", None}, optional + Persistence mode: "w" means create (overwrite if exists); + "w-" means create (fail if exists); + "a" means override existing variables (create if does not exist). If ``append_dim`` is set, ``mode`` can be omitted as it is - internally set to ``'a'``. Otherwise, ``mode`` will default to + internally set to ``"a"``. Otherwise, ``mode`` will default to `w-` if not set. synchronizer : object, optional Array synchronizer @@ -1603,7 +1603,7 @@ def to_zarr( encoding : dict, optional Nested dictionary with variable names as keys and dictionaries of variable specific encodings as values, e.g., - ``{'my_variable': {'dtype': 'int16', 'scale_factor': 0.1,}, ...}`` + ``{"my_variable": {"dtype": "int16", "scale_factor": 0.1,}, ...}`` compute: bool, optional If True compute immediately, otherwise return a ``dask.delayed.Delayed`` object that can be computed later. @@ -5503,13 +5503,13 @@ def integrate(self, coord, datetime_unit=None): ---------- coord: str, or sequence of str Coordinate(s) used for the integration. - datetime_unit : {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', \ - 'ps', 'fs', 'as'} + datetime_unit : {"Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", \ + "ps", "fs", "as"}, optional Can be specify the unit if datetime coordinate is used. Returns ------- - integrated: Dataset + integrated : Dataset See also -------- @@ -6129,10 +6129,10 @@ def pad( ``(constant,)`` or ``constant`` is a shortcut for ``before = after = constant`` for all axes. Default is 0. - reflect_type : {'even', 'odd'}, optional - Used in 'reflect', and 'symmetric'. The 'even' style is the + reflect_type : {"even", "odd"}, optional + Used in "reflect", and "symmetric". The "even" style is the default with an unaltered reflection around the edge value. For - the 'odd' style, the extended part of the array is created by + the "odd" style, the extended part of the array is created by subtracting the reflected values from two times the edge value. **pad_width_kwargs The keyword arguments form of ``pad_width``. diff --git a/xarray/core/merge.py b/xarray/core/merge.py index c76daeb3153..2a837295472 100644 --- a/xarray/core/merge.py +++ b/xarray/core/merge.py @@ -93,7 +93,7 @@ def unique_variable( variables : list of Variable List of Variable objects, all of which go by the same name in different inputs. - compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts', 'override'}, optional + compat : {"identical", "equals", "broadcast_equals", "no_conflicts", "override"}, optional Type of equality check to use. equals : None or bool, optional corresponding to result of compat test @@ -416,7 +416,7 @@ def _get_priority_vars_and_indexes( Dictionaries in which to find the priority variables. priority_arg : int or None Integer object whose variable should take priority. - compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts'}, optional + compat : {"identical", "equals", "broadcast_equals", "no_conflicts"}, optional Compatibility checks to use when merging variables. Returns @@ -554,11 +554,11 @@ def merge_core( ---------- objects : list of mapping All values must be convertable to labeled arrays. - compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts', 'override'}, optional + compat : {"identical", "equals", "broadcast_equals", "no_conflicts", "override"}, optional Compatibility checks to use when merging variables. - join : {'outer', 'inner', 'left', 'right'}, optional + join : {"outer", "inner", "left", "right"}, optional How to combine objects with different indexes. - combine_attrs : {'drop', 'identical', 'no_conflicts', 'override'}, optional + combine_attrs : {"drop", "identical", "no_conflicts", "override"}, optional How to combine attributes of objects priority_arg : int, optional Optional argument in `objects` that takes precedence over the others. @@ -641,42 +641,42 @@ def merge( objects : iterable of Dataset or iterable of DataArray or iterable of dict-like Merge together all variables from these objects. If any of them are DataArray objects, they must have a name. - compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts', 'override'}, optional + compat : {"identical", "equals", "broadcast_equals", "no_conflicts", "override"}, optional String indicating how to compare variables of the same name for potential conflicts: - - 'broadcast_equals': all values must be equal when variables are + - "broadcast_equals": all values must be equal when variables are broadcast against each other to ensure common dimensions. - - 'equals': all values and dimensions must be the same. - - 'identical': all values, dimensions and attributes must be the + - "equals": all values and dimensions must be the same. + - "identical": all values, dimensions and attributes must be the same. - - 'no_conflicts': only values which are not null in both datasets + - "no_conflicts": only values which are not null in both datasets must be equal. The returned dataset then contains the combination of all non-null values. - - 'override': skip comparing and pick variable from first dataset - join : {'outer', 'inner', 'left', 'right', 'exact'}, optional + - "override": skip comparing and pick variable from first dataset + join : {"outer", "inner", "left", "right", "exact"}, optional String indicating how to combine differing indexes in objects. - - 'outer': use the union of object indexes - - 'inner': use the intersection of object indexes - - 'left': use indexes from the first object with each dimension - - 'right': use indexes from the last object with each dimension - - 'exact': instead of aligning, raise `ValueError` when indexes to be + - "outer": use the union of object indexes + - "inner": use the intersection of object indexes + - "left": use indexes from the first object with each dimension + - "right": use indexes from the last object with each dimension + - "exact": instead of aligning, raise `ValueError` when indexes to be aligned are not equal - - 'override': if indexes are of same size, rewrite indexes to be + - "override": if indexes are of same size, rewrite indexes to be those of the first object with that dimension. Indexes for the same dimension must have the same size in all objects. fill_value : scalar, optional Value to use for newly missing values - combine_attrs : {'drop', 'identical', 'no_conflicts', 'override'}, \ - default: 'drop' + combine_attrs : {"drop", "identical", "no_conflicts", "override"}, \ + default: "drop" String indicating how to combine attrs of the objects being merged: - - 'drop': empty attrs on returned Dataset. - - 'identical': all attrs must be the same on every object. - - 'no_conflicts': attrs from all objects are combined, any that have + - "drop": empty attrs on returned Dataset. + - "identical": all attrs must be the same on every object. + - "no_conflicts": attrs from all objects are combined, any that have the same name must also have the same value. - - 'override': skip comparing and copy attrs from the first dataset to + - "override": skip comparing and copy attrs from the first dataset to the result. Returns diff --git a/xarray/core/ops.py b/xarray/core/ops.py index 3675317977f..7c4d147c1f0 100644 --- a/xarray/core/ops.py +++ b/xarray/core/ops.py @@ -140,22 +140,22 @@ def fillna(data, other, join="left", dataset_join="left"): Parameters ---------- - join : {'outer', 'inner', 'left', 'right'}, optional + join : {"outer", "inner", "left", "right"}, optional Method for joining the indexes of the passed objects along each dimension - - 'outer': use the union of object indexes - - 'inner': use the intersection of object indexes - - 'left': use indexes from the first object with each dimension - - 'right': use indexes from the last object with each dimension - - 'exact': raise `ValueError` instead of aligning when indexes to be + - "outer": use the union of object indexes + - "inner": use the intersection of object indexes + - "left": use indexes from the first object with each dimension + - "right": use indexes from the last object with each dimension + - "exact": raise `ValueError` instead of aligning when indexes to be aligned are not equal - dataset_join : {'outer', 'inner', 'left', 'right'}, optional + dataset_join : {"outer", "inner", "left", "right"}, optional Method for joining variables of Dataset objects with mismatched data variables. - - 'outer': take variables from both Dataset objects - - 'inner': take only overlapped variables - - 'left': take only variables from the first object - - 'right': take only variables from the last object + - "outer": take variables from both Dataset objects + - "inner": take only overlapped variables + - "left": take only variables from the first object + - "right": take only variables from the last object """ from .computation import apply_ufunc diff --git a/xarray/core/resample.py b/xarray/core/resample.py index 08e384da1d3..af9711a3cc3 100644 --- a/xarray/core/resample.py +++ b/xarray/core/resample.py @@ -29,8 +29,8 @@ def _upsample(self, method, *args, **kwargs): Parameters ---------- - method : {'asfreq', 'pad', 'ffill', 'backfill', 'bfill', 'nearest', \ - 'interpolate'} + method : {"asfreq", "pad", "ffill", "backfill", "bfill", "nearest", \ + "interpolate"} Method to use for up-sampling See Also diff --git a/xarray/core/variable.py b/xarray/core/variable.py index 37f294f9003..a9567e80ce4 100644 --- a/xarray/core/variable.py +++ b/xarray/core/variable.py @@ -1209,10 +1209,10 @@ def pad( end_values : scalar, tuple or mapping of hashable to tuple Used in 'linear_ramp'. The values used for the ending value of the linear_ramp and that will form the edge of the padded array. - reflect_type : {'even', 'odd'}, optional - Used in 'reflect', and 'symmetric'. The 'even' style is the + reflect_type : {"even", "odd"}, optional + Used in "reflect", and "symmetric". The "even" style is the default with an unaltered reflection around the edge value. For - the 'odd' style, the extended part of the array is created by + the "odd" style, the extended part of the array is created by subtracting the reflected values from two times the edge value. **pad_width_kwargs One of pad_width or pad_width_kwargs must be provided. diff --git a/xarray/plot/dataset_plot.py b/xarray/plot/dataset_plot.py index 2f2fc3e59b8..51ceff170cb 100644 --- a/xarray/plot/dataset_plot.py +++ b/xarray/plot/dataset_plot.py @@ -231,7 +231,7 @@ def _dsplot(plotfunc): robust : bool, optional If True and ``vmin`` or ``vmax`` are absent, the colormap range is computed with 2nd and 98th percentiles instead of the extreme values. - extend : {'neither', 'both', 'min', 'max'}, optional + extend : {"neither", "both", "min", "max"}, optional How to draw arrows extending the colorbar beyond its limits. If not provided, extend is inferred from vmin, vmax and the data limits. levels : int or list-like object, optional diff --git a/xarray/plot/plot.py b/xarray/plot/plot.py index 9041d2c63c8..2f5c3a1b366 100644 --- a/xarray/plot/plot.py +++ b/xarray/plot/plot.py @@ -337,16 +337,16 @@ def step(darray, *args, where="pre", drawstyle=None, ds=None, **kwargs): Parameters ---------- - where : {'pre', 'post', 'mid'}, optional, default 'pre' + where : {"pre", "post", "mid"}, default: "pre" Define where the steps should be placed: - - 'pre': The y value is continued constantly to the left from + - "pre": The y value is continued constantly to the left from every *x* position, i.e. the interval ``(x[i-1], x[i]]`` has the value ``y[i]``. - - 'post': The y value is continued constantly to the right from + - "post": The y value is continued constantly to the right from every *x* position, i.e. the interval ``[x[i], x[i+1])`` has the value ``y[i]``. - - 'mid': Steps occur half-way between the *x* positions. + - "mid": Steps occur half-way between the *x* positions. Note that this parameter is ignored if one coordinate consists of :py:func:`pandas.Interval` values, e.g. as a result of @@ -537,7 +537,7 @@ def _plot2d(plotfunc): robust : bool, optional If True and ``vmin`` or ``vmax`` are absent, the colormap range is computed with 2nd and 98th percentiles instead of the extreme values. - extend : {'neither', 'both', 'min', 'max'}, optional + extend : {"neither", "both", "min", "max"}, optional How to draw arrows extending the colorbar beyond its limits. If not provided, extend is inferred from vmin, vmax and the data limits. levels : int or list-like object, optional From 373fd3fea4b4836a70e59caaee6c7763f1d9d172 Mon Sep 17 00:00:00 2001 From: Keewis Date: Tue, 11 Aug 2020 23:51:17 +0200 Subject: [PATCH 32/50] make sure the standard default notation is used everywhere --- xarray/coding/cftime_offsets.py | 14 +++++++------- xarray/coding/cftimeindex.py | 2 +- xarray/core/accessor_str.py | 6 +++--- xarray/core/dataarray.py | 4 ++-- xarray/core/ops.py | 2 +- xarray/core/rolling.py | 4 ++-- xarray/plot/plot.py | 6 ++---- xarray/tests/test_duck_array_ops.py | 2 +- 8 files changed, 19 insertions(+), 21 deletions(-) diff --git a/xarray/coding/cftime_offsets.py b/xarray/coding/cftime_offsets.py index a2306331ca7..a1521b27ad2 100644 --- a/xarray/coding/cftime_offsets.py +++ b/xarray/coding/cftime_offsets.py @@ -797,17 +797,17 @@ def cftime_range( Right bound for generating dates. periods : integer, optional Number of periods to generate. - freq : str, default 'D', BaseCFTimeOffset, or None - Frequency strings can have multiples, e.g. '5H'. - normalize : bool, default False + freq : str, default "D", BaseCFTimeOffset, or None + Frequency strings can have multiples, e.g. "5H". + normalize : bool, default: False Normalize start/end dates to midnight before generating date range. - name : str, default None + name : str, default: None Name of the resulting index - closed : {None, 'left', 'right'}, optional + closed : {None, "left", "right"}, optional Make the interval closed with respect to the given frequency to the - 'left', 'right', or both sides (None, the default). + "left", "right", or both sides (None, the default). calendar : str - Calendar type for the datetimes (default 'standard'). + Calendar type for the datetimes (default "standard"). Returns ------- diff --git a/xarray/coding/cftimeindex.py b/xarray/coding/cftimeindex.py index b843b8f3c48..7a57b7c2dcd 100644 --- a/xarray/coding/cftimeindex.py +++ b/xarray/coding/cftimeindex.py @@ -272,7 +272,7 @@ class CFTimeIndex(pd.Index): ---------- data : array or CFTimeIndex Sequence of cftime.datetime objects to use in index - name : str, default None + name : str, default: None Name of the resulting index See Also diff --git a/xarray/core/accessor_str.py b/xarray/core/accessor_str.py index ccaa7a56380..f9bab49f308 100644 --- a/xarray/core/accessor_str.py +++ b/xarray/core/accessor_str.py @@ -440,7 +440,7 @@ def center(self, width, fillchar=" "): width : int Minimum width of resulting string; additional characters will be filled with ``fillchar`` - fillchar : str + fillchar : str, default: " " Additional character for filling, default is whitespace Returns @@ -459,7 +459,7 @@ def ljust(self, width, fillchar=" "): width : int Minimum width of resulting string; additional characters will be filled with ``fillchar`` - fillchar : str + fillchar : str, default: " " Additional character for filling, default is whitespace Returns @@ -477,7 +477,7 @@ def rjust(self, width, fillchar=" "): width : int Minimum width of resulting string; additional characters will be filled with ``fillchar`` - fillchar : str + fillchar : str, default: " " Additional character for filling, default is whitespace Returns diff --git a/xarray/core/dataarray.py b/xarray/core/dataarray.py index b9a9b017efd..1a4bf2b3007 100644 --- a/xarray/core/dataarray.py +++ b/xarray/core/dataarray.py @@ -492,7 +492,7 @@ def to_dataset( name : hashable, optional Name to substitute for this array's name. Only valid if ``dim`` is not provided. - promote_attrs : bool, default False + promote_attrs : bool, default: False Set to True to shallow copy attrs of DataArray to returned Dataset. Returns @@ -2206,7 +2206,7 @@ def interpolate_na( * x (x) int64 0 1 2 3 4 5 6 7 8 The gap lengths are 3-0 = 3; 6-3 = 3; and 8-6 = 2 respectively - keep_attrs : bool, default True + keep_attrs : bool, default: True If True, the dataarray's attributes (`attrs`) will be copied from the original object to the new one. If False, the new object will be returned without attributes. diff --git a/xarray/core/ops.py b/xarray/core/ops.py index 7c4d147c1f0..9dd9ee24ccd 100644 --- a/xarray/core/ops.py +++ b/xarray/core/ops.py @@ -114,7 +114,7 @@ implemented (object, datetime64 or timedelta64).""" _MINCOUNT_DOCSTRING = """ -min_count : int, default None +min_count : int, default: None The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. New in version 0.10.8: Added with the default being None.""" diff --git a/xarray/core/rolling.py b/xarray/core/rolling.py index 92cc03c6b25..4d51e63f831 100644 --- a/xarray/core/rolling.py +++ b/xarray/core/rolling.py @@ -184,11 +184,11 @@ def __init__(self, obj, windows, min_periods=None, center=False, keep_attrs=None along (e.g., `time`). window : int Size of the moving window. - min_periods : int, default None + min_periods : int, default: None Minimum number of observations in window required to have a value (otherwise result is NA). The default, None, is equivalent to setting min_periods equal to the size of the window. - center : bool, default False + center : bool, default: False Set the labels at the center of the window. keep_attrs : bool, optional If True, the object's attributes (`attrs`) will be copied from diff --git a/xarray/plot/plot.py b/xarray/plot/plot.py index 2f5c3a1b366..305405d4e5a 100644 --- a/xarray/plot/plot.py +++ b/xarray/plot/plot.py @@ -720,9 +720,7 @@ def newplotfunc( if "imshow" == plotfunc.__name__ and isinstance(aspect, str): # forbid usage of mpl strings - raise ValueError( - "plt.imshow's `aspect` kwarg is not available " "in xarray" - ) + raise ValueError("plt.imshow's `aspect` kwarg is not available in xarray") if subplot_kws is None: subplot_kws = dict() @@ -753,7 +751,7 @@ def newplotfunc( elif cbar_ax is not None or cbar_kwargs: # inform the user about keywords which aren't used raise ValueError( - "cbar_ax and cbar_kwargs can't be used with " "add_colorbar=False." + "cbar_ax and cbar_kwargs can't be used with add_colorbar=False." ) # origin kwarg overrides yincrease diff --git a/xarray/tests/test_duck_array_ops.py b/xarray/tests/test_duck_array_ops.py index feedcd27164..e52157ee38c 100644 --- a/xarray/tests/test_duck_array_ops.py +++ b/xarray/tests/test_duck_array_ops.py @@ -602,7 +602,7 @@ def test_docs(): skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or skipna=True has not been implemented (object, datetime64 or timedelta64). - min_count : int, default None + min_count : int, default: None The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. New in version 0.10.8: Added with the default being None. From a2fd26816abb9eecdf82a6e3d692f435133cb500 Mon Sep 17 00:00:00 2001 From: Keewis Date: Wed, 12 Aug 2020 00:47:56 +0200 Subject: [PATCH 33/50] add a missing end quote --- xarray/core/concat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xarray/core/concat.py b/xarray/core/concat.py index b389e2a85e8..fa3fac92277 100644 --- a/xarray/core/concat.py +++ b/xarray/core/concat.py @@ -94,7 +94,7 @@ def concat( those of the first object with that dimension. Indexes for the same dimension must have the same size in all objects. combine_attrs : {"drop", "identical", "no_conflicts", "override"}, \ - default: "override + default: "override" String indicating how to combine attrs of the objects being merged: - "drop": empty attrs on returned Dataset. From 13e0f3d451ff478ce9fab58bac2cf1eb0eaa07af Mon Sep 17 00:00:00 2001 From: Keewis Date: Wed, 12 Aug 2020 14:49:04 +0200 Subject: [PATCH 34/50] don't use nested parameter definition lists --- xarray/core/common.py | 20 ++++---------------- xarray/core/rolling.py | 36 ++++++++---------------------------- xarray/core/rolling_exp.py | 10 ++-------- 3 files changed, 14 insertions(+), 52 deletions(-) diff --git a/xarray/core/common.py b/xarray/core/common.py index 656ccbe5031..8ef34ca3b73 100644 --- a/xarray/core/common.py +++ b/xarray/core/common.py @@ -881,15 +881,9 @@ def rolling_exp( Parameters ---------- - window : {dim: window_size}, optional - A single mapping from a dimension name to window value. - - dim : str - Name of the dimension to create the rolling exponential window - along (e.g., `time`). - window : int - Size of the moving window. The type of this is specified in - `window_type` + window : mapping of hashable to int, optional + A mapping from the name of the dimension to create the rolling + exponential window along (e.g. `time`) to the size of the moving window. window_type : {"span", "com", "halflife", "alpha"}, default: "span" The format of the previously supplied window. Each is a simple numerical transformation of the others. Described in detail: @@ -920,14 +914,8 @@ def coarsen( Parameters ---------- - dim: dict, optional + dim : mapping of hashable to int, optional Mapping from the dimension name to the window size. - - dim : str - Name of the dimension to create the rolling iterator - along (e.g., `time`). - window : int - Size of the moving window. boundary : {"exact", "trim", "pad"}, default: "exact" If 'exact', a ValueError will be raised if dimension size is not a multiple of the window size. If 'trim', the excess entries are diff --git a/xarray/core/rolling.py b/xarray/core/rolling.py index 4d51e63f831..a595f9db266 100644 --- a/xarray/core/rolling.py +++ b/xarray/core/rolling.py @@ -55,13 +55,8 @@ def __init__(self, obj, windows, min_periods=None, center=False, keep_attrs=None obj : Dataset or DataArray Object to window. windows : mapping of hashable to int - A mapping from a dimension name to window size - - dim : str - Name of the dimension to create the rolling iterator - along (e.g., `time`). - window : int - Size of the moving window. + A mapping from the name of the dimension to create the rolling + exponential window along (e.g. `time`) to the size of the moving window. min_periods : int, default: None Minimum number of observations in window required to have a value (otherwise result is NA). The default, None, is equivalent to @@ -177,13 +172,8 @@ def __init__(self, obj, windows, min_periods=None, center=False, keep_attrs=None obj : DataArray Object to window. windows : mapping of hashable to int - A mapping from a dimension name to window size - - dim : str - Name of the dimension to create the rolling iterator - along (e.g., `time`). - window : int - Size of the moving window. + A mapping from the name of the dimension to create the rolling + exponential window along (e.g. `time`) to the size of the moving window. min_periods : int, default: None Minimum number of observations in window required to have a value (otherwise result is NA). The default, None, is equivalent to @@ -453,13 +443,8 @@ def __init__(self, obj, windows, min_periods=None, center=False, keep_attrs=None obj : Dataset Object to window. windows : mapping of hashable to int - A mapping from a dimension name to window size - - dim : str - Name of the dimension to create the rolling iterator - along (e.g., `time`). - window : int - Size of the moving window. + A mapping from the name of the dimension to create the rolling + exponential window along (e.g. `time`) to the size of the moving window. min_periods : int, default: None Minimum number of observations in window required to have a value (otherwise result is NA). The default, None, is equivalent to @@ -642,13 +627,8 @@ def __init__(self, obj, windows, boundary, side, coord_func, keep_attrs): obj : Dataset or DataArray Object to window. windows : mapping of hashable to int - A mapping from a dimension name to window size - - dim : str - Name of the dimension to create the rolling iterator - along (e.g., `time`). - window : int - Size of the moving window. + A mapping from the name of the dimension to create the rolling + exponential window along (e.g. `time`) to the size of the moving window. boundary : 'exact' | 'trim' | 'pad' If 'exact', a ValueError will be raised if dimension size is not a multiple of window size. If 'trim', the excess indexes are trimed. diff --git a/xarray/core/rolling_exp.py b/xarray/core/rolling_exp.py index 41c60fe57cd..c2646846e43 100644 --- a/xarray/core/rolling_exp.py +++ b/xarray/core/rolling_exp.py @@ -66,14 +66,8 @@ class RollingExp: obj : Dataset or DataArray Object to window. windows : mapping of hashable to int - A single mapping from a single dimension name to window value - - dim : str - Name of the dimension to create the rolling exponential window - along (e.g., `time`). - window : int - Size of the moving window. The type of this is specified in - `window_type` + A mapping from the name of the dimension to create the rolling + exponential window along (e.g. `time`) to the size of the moving window. window_type : {"span", "com", "halflife", "alpha"}, default: "span" The format of the previously supplied window. Each is a simple numerical transformation of the others. Described in detail: From 9b08bbf06c6c5dee322df3a9b5081315a6c1298a Mon Sep 17 00:00:00 2001 From: Keewis Date: Wed, 12 Aug 2020 15:07:40 +0200 Subject: [PATCH 35/50] update some outdated links to the pandas docs --- doc/weather-climate.rst | 4 ++-- xarray/coding/cftime_offsets.py | 2 +- xarray/core/common.py | 2 +- xarray/core/rolling_exp.py | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/doc/weather-climate.rst b/doc/weather-climate.rst index 4ed11d34d7a..47bb57c551d 100644 --- a/doc/weather-climate.rst +++ b/doc/weather-climate.rst @@ -195,6 +195,6 @@ For data indexed by a :py:class:`~xarray.CFTimeIndex` xarray currently supports: and silent errors due to the difference in calendar types between the dates encoded in your data and the dates stored in memory. -.. _Timestamp-valid range: https://pandas.pydata.org/pandas-docs/stable/timeseries.html#timestamp-limitations +.. _Timestamp-valid range: https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timestamp-limitations .. _ISO 8601-format: https://en.wikipedia.org/wiki/ISO_8601 -.. _partial datetime string indexing: https://pandas.pydata.org/pandas-docs/stable/timeseries.html#partial-string-indexing +.. _partial datetime string indexing: https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#partial-string-indexing diff --git a/xarray/coding/cftime_offsets.py b/xarray/coding/cftime_offsets.py index a1521b27ad2..6dfdb408bb5 100644 --- a/xarray/coding/cftime_offsets.py +++ b/xarray/coding/cftime_offsets.py @@ -946,7 +946,7 @@ def cftime_range( As in the standard pandas function, three of the ``start``, ``end``, ``periods``, or ``freq`` arguments must be specified at a given time, with the other set to ``None``. See the `pandas documentation - `_ + `_ for more examples of the behavior of ``date_range`` with each of the parameters. diff --git a/xarray/core/common.py b/xarray/core/common.py index 8ef34ca3b73..4207aea3a25 100644 --- a/xarray/core/common.py +++ b/xarray/core/common.py @@ -887,7 +887,7 @@ def rolling_exp( window_type : {"span", "com", "halflife", "alpha"}, default: "span" The format of the previously supplied window. Each is a simple numerical transformation of the others. Described in detail: - https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.ewm.html + https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.ewm.html **window_kwargs : optional The keyword arguments form of ``window``. One of window or window_kwargs must be provided. diff --git a/xarray/core/rolling_exp.py b/xarray/core/rolling_exp.py index c2646846e43..525867cc025 100644 --- a/xarray/core/rolling_exp.py +++ b/xarray/core/rolling_exp.py @@ -71,7 +71,7 @@ class RollingExp: window_type : {"span", "com", "halflife", "alpha"}, default: "span" The format of the previously supplied window. Each is a simple numerical transformation of the others. Described in detail: - https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.ewm.html + https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.ewm.html Returns ------- From 2b2656180fdc3a773e85a3412c363e0068167521 Mon Sep 17 00:00:00 2001 From: Keewis Date: Wed, 12 Aug 2020 15:08:44 +0200 Subject: [PATCH 36/50] update some more docstrings --- xarray/conventions.py | 22 +++++++++++----------- xarray/core/accessor_dt.py | 11 ++++++----- xarray/core/dataarray.py | 37 +++++++++++++++++++------------------ 3 files changed, 36 insertions(+), 34 deletions(-) diff --git a/xarray/conventions.py b/xarray/conventions.py index cac884eb72c..da5ad7eea85 100644 --- a/xarray/conventions.py +++ b/xarray/conventions.py @@ -284,13 +284,13 @@ def decode_cf_variable( A variable holding potentially CF encoded information. concat_characters : bool Should character arrays be concatenated to strings, for - example: ['h', 'e', 'l', 'l', 'o'] -> 'hello' + example: ["h", "e", "l", "l", "o"] -> "hello" mask_and_scale : bool Lazily scale (using scale_factor and add_offset) and mask (using _FillValue). If the _Unsigned attribute is present treat integer arrays as unsigned. decode_times : bool - Decode cf times ('hours since 2000-01-01') to np.datetime64. + Decode cf times ("hours since 2000-01-01") to np.datetime64. decode_endianness : bool Decode arrays from non-native to native endianness. stack_char_dim : bool @@ -299,7 +299,7 @@ def decode_cf_variable( dataset to figure out if this is appropriate. use_cftime : bool, optional Only relevant if encoded dates come from a standard calendar - (e.g. 'gregorian', 'proleptic_gregorian', 'standard', or not + (e.g. "gregorian", "proleptic_gregorian", "standard", or not specified). If None (default), attempt to decode times to ``np.datetime64[ns]`` objects; if this is not possible, decode times to ``cftime.datetime`` objects. If True, always decode times to @@ -533,23 +533,23 @@ def decode_cf( Object to decode. concat_characters : bool, optional Should character arrays be concatenated to strings, for - example: ['h', 'e', 'l', 'l', 'o'] -> 'hello' + example: ["h", "e", "l", "l", "o"] -> "hello" mask_and_scale : bool, optional Lazily scale (using scale_factor and add_offset) and mask (using _FillValue). decode_times : bool, optional - Decode cf times (e.g., integers since 'hours since 2000-01-01') to + Decode cf times (e.g., integers since "hours since 2000-01-01") to np.datetime64. decode_coords : bool, optional Use the 'coordinates' attribute on variable (or the dataset itself) to identify coordinates. - drop_variables: str or iterable, optional + drop_variables : str or iterable, optional A variable or list of variables to exclude from being parsed from the dataset. This may be useful to drop variables with problems or inconsistent values. - use_cftime: bool, optional + use_cftime : bool, optional Only relevant if encoded dates come from a standard calendar - (e.g. 'gregorian', 'proleptic_gregorian', 'standard', or not + (e.g. "gregorian", "proleptic_gregorian", "standard", or not specified). If None (default), attempt to decode times to ``np.datetime64[ns]`` objects; if this is not possible, decode times to ``cftime.datetime`` objects. If True, always decode times to @@ -559,7 +559,7 @@ def decode_cf( raise an error. decode_timedelta : bool, optional If True, decode variables and coordinates with time units in - {'days', 'hours', 'minutes', 'seconds', 'milliseconds', 'microseconds'} + {"days", "hours", "minutes", "seconds", "milliseconds", "microseconds"} into timedelta objects. If False, leave them encoded as numbers. If None (default), assume the same value of decode_time. @@ -621,12 +621,12 @@ def cf_decoder( A dictionary mapping from attribute name to value concat_characters : bool Should character arrays be concatenated to strings, for - example: ['h', 'e', 'l', 'l', 'o'] -> 'hello' + example: ["h", "e", "l", "l", "o"] -> "hello" mask_and_scale: bool Lazily scale (using scale_factor and add_offset) and mask (using _FillValue). decode_times : bool - Decode cf times ('hours since 2000-01-01') to np.datetime64. + Decode cf times ("hours since 2000-01-01") to np.datetime64. Returns ------- diff --git a/xarray/core/accessor_dt.py b/xarray/core/accessor_dt.py index e686dbc503f..a84da37986e 100644 --- a/xarray/core/accessor_dt.py +++ b/xarray/core/accessor_dt.py @@ -104,9 +104,10 @@ def _round_field(values, name, freq): ---------- values : np.ndarray or dask.array-like Array-like container of datetime-like values - name : str (ceil, floor, round) + name : {"ceil", "floor", "round"} Name of rounding function - freq : a freq string indicating the rounding resolution + freq : str + a freq string indicating the rounding resolution Returns ------- @@ -191,7 +192,7 @@ def floor(self, freq): Parameters ---------- freq : str - a freq string indicating the rounding resolution e.g. 'D' for daily resolution + a freq string indicating the rounding resolution e.g. "D" for daily resolution Returns ------- @@ -208,7 +209,7 @@ def ceil(self, freq): Parameters ---------- freq : str - a freq string indicating the rounding resolution e.g. 'D' for daily resolution + a freq string indicating the rounding resolution e.g. "D" for daily resolution Returns ------- @@ -224,7 +225,7 @@ def round(self, freq): Parameters ---------- freq : str - a freq string indicating the rounding resolution e.g. 'D' for daily resolution + a freq string indicating the rounding resolution e.g. "D" for daily resolution Returns ------- diff --git a/xarray/core/dataarray.py b/xarray/core/dataarray.py index 1a4bf2b3007..6a16956384b 100644 --- a/xarray/core/dataarray.py +++ b/xarray/core/dataarray.py @@ -1415,8 +1415,8 @@ def interp( method : str, default: "linear" The method used to interpolate. Choose from - - {'linear', 'nearest'} for multidimensional array, - - {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'} for 1-dimensional array. + - {"linear", "nearest"} for multidimensional array, + - {"linear", "nearest", "zero", "slinear", "quadratic", "cubic"} for 1-dimensional array. assume_sorted : bool, optional If False, values of x can be in any order and they are sorted first. If True, x has to be an array of monotonically increasing @@ -1485,8 +1485,8 @@ def interp_like( method : str, default: "linear" The method used to interpolate. Choose from - - {'linear', 'nearest'} for multidimensional array, - - {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'} for 1-dimensional array. + - {"linear", "nearest"} for multidimensional array, + - {"linear", "nearest", "zero", "slinear", "quadratic", "cubic"} for 1-dimensional array. assume_sorted : bool, optional If False, values of coordinates that are interpolated over can be in any order and they are sorted first. If True, interpolated @@ -2490,22 +2490,23 @@ def from_dict(cls, d: dict) -> "DataArray": Input dict can take several forms:: - d = {'dims': ('t'), 'data': x} + >>> d = {"dims": ("t"), "data": x} - d = {'coords': {'t': {'dims': 't', 'data': t, - 'attrs': {'units':'s'}}}, - 'attrs': {'title': 'air temperature'}, - 'dims': 't', - 'data': x, - 'name': 'a'} + >>> d = { + ... "coords": {"t": {"dims": "t", "data": t, "attrs": {"units": "s"}}}, + ... "attrs": {"title": "air temperature"}, + ... "dims": "t", + ... "data": x, + ... "name": "a", + ... } - where 't' is the name of the dimesion, 'a' is the name of the array, + where "t" is the name of the dimesion, "a" is the name of the array, and x and t are lists, numpy.arrays, or pandas objects. Parameters ---------- d : dict - Mapping with a minimum structure of {'dims': [...], 'data': [...]} + Mapping with a minimum structure of {"dims": [...], "data": [...]} Returns ------- @@ -3009,7 +3010,7 @@ def sortby( Returns ------- - sorted: DataArray + sorted : DataArray A new dataarray where all the specified dims are sorted by dim labels. @@ -3183,12 +3184,12 @@ def differentiate( Parameters ---------- - coord: hashable + coord : hashable The coordinate to be used to compute the gradient. - edge_order: {1, 2}, default: 1 + edge_order : {1, 2}, default: 1 N-th order accurate differences at the boundaries. - datetime_unit: None or {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', \ - 'us', 'ns', 'ps', 'fs', 'as'} + datetime_unit : None or {"Y", "M", "W", "D", "h", "m", "s", "ms", \ + "us", "ns", "ps", "fs", "as"} Unit to compute gradient. Only valid for datetime coordinate. Returns From 28ae240211005f83bc3fd4afe72956a92f7a3ec9 Mon Sep 17 00:00:00 2001 From: Keewis Date: Wed, 12 Aug 2020 15:20:08 +0200 Subject: [PATCH 37/50] mark a parameter as optional --- xarray/core/dataarray.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/xarray/core/dataarray.py b/xarray/core/dataarray.py index 6a16956384b..c68a8e004d3 100644 --- a/xarray/core/dataarray.py +++ b/xarray/core/dataarray.py @@ -3188,8 +3188,8 @@ def differentiate( The coordinate to be used to compute the gradient. edge_order : {1, 2}, default: 1 N-th order accurate differences at the boundaries. - datetime_unit : None or {"Y", "M", "W", "D", "h", "m", "s", "ms", \ - "us", "ns", "ps", "fs", "as"} + datetime_unit : {"Y", "M", "W", "D", "h", "m", "s", "ms", \ + "us", "ns", "ps", "fs", "as"} or None, optional Unit to compute gradient. Only valid for datetime coordinate. Returns From 836cf80c6016bc0a3c5c1c5d668cadd693585fb9 Mon Sep 17 00:00:00 2001 From: Keewis Date: Wed, 12 Aug 2020 15:25:41 +0200 Subject: [PATCH 38/50] more docstrings --- xarray/core/dataset.py | 61 +++++++++++++++++++++++------------------- 1 file changed, 33 insertions(+), 28 deletions(-) diff --git a/xarray/core/dataset.py b/xarray/core/dataset.py index 95910a85156..77f9dc6be77 100644 --- a/xarray/core/dataset.py +++ b/xarray/core/dataset.py @@ -1529,12 +1529,12 @@ def to_netcdf( encoding : dict, optional Nested dictionary with variable names as keys and dictionaries of variable specific encodings as values, e.g., - ``{'my_variable': {'dtype': 'int16', 'scale_factor': 0.1, - 'zlib': True}, ...}`` + ``{"my_variable": {"dtype": "int16", "scale_factor": 0.1, + "zlib": True}, ...}`` The `h5netcdf` engine supports both the NetCDF4-style compression - encoding parameters ``{'zlib': True, 'complevel': 9}`` and the h5py - ones ``{'compression': 'gzip', 'compression_opts': 9}``. + encoding parameters ``{"zlib": True, "complevel": 9}`` and the h5py + ones ``{"compression": "gzip", "compression_opts": 9}``. This allows using any compression plugin installed in the HDF5 library, e.g. LZF. @@ -1542,14 +1542,14 @@ def to_netcdf( Dimension(s) that should be serialized as unlimited dimensions. By default, no dimensions are treated as unlimited dimensions. Note that unlimited_dims may also be set via - ``dataset.encoding['unlimited_dims']``. + ``dataset.encoding["unlimited_dims"]``. compute: bool, default: True If true compute immediately, otherwise return a ``dask.delayed.Delayed`` object that can be computed later. invalid_netcdf: bool, default: False - Only valid along with engine='h5netcdf'. If True, allow writing + Only valid along with ``engine="h5netcdf"``. If True, allow writing hdf5 files which are invalid netcdf as described in - https://github.com/shoyer/h5netcdf. Default: False. + https://github.com/shoyer/h5netcdf. """ if encoding is None: encoding = {} @@ -1744,7 +1744,7 @@ def chunk( ---------- chunks : int, 'auto' or mapping, optional Chunk sizes along each dimension, e.g., ``5`` or - ``{'x': 5, 'y': 5}``. + ``{"x": 5, "y": 5}``. name_prefix : str, optional Prefix for the name of any new dask arrays. token : str, optional @@ -2587,9 +2587,9 @@ def interp( If DataArrays are passed as new coordates, their dimensions are used for the broadcasting. method : str, optional - {'linear', 'nearest'} for multidimensional array, - {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'} - for 1-dimensional array. 'linear' is used by default. + {"linear", "nearest"} for multidimensional array, + {"linear", "nearest", "zero", "slinear", "quadratic", "cubic"} + for 1-dimensional array. "linear" is used by default. assume_sorted : bool, optional If False, values of coordinates that are interpolated over can be in any order and they are sorted first. If True, interpolated @@ -2715,8 +2715,8 @@ def interp_like( names to an 1d array-like, which provides coordinates upon which to index the variables in this dataset. method : str, optional - {'linear', 'nearest'} for multidimensional array, - {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'} + {"linear", "nearest"} for multidimensional array, + {"linear", "nearest", "zero", "slinear", "quadratic", "cubic"} for 1-dimensional array. 'linear' is used by default. assume_sorted : bool, optional If False, values of coordinates that are interpolated over can be @@ -4113,7 +4113,7 @@ def interpolate_na( or None for no limit. This filling is done regardless of the size of the gap in the data. To only interpolate over gaps less than a given length, see ``max_gap``. - max_gap: int, float, str, pandas.Timedelta, numpy.timedelta64, datetime.timedelta, default: None + max_gap : int, float, str, pandas.Timedelta, numpy.timedelta64, datetime.timedelta, default: None Maximum size of gap, a continuous sequence of NaNs, that will be filled. Use None for no limit. When interpolating along a datetime64 dimension and ``use_coordinate=True``, ``max_gap`` can be one of the following: @@ -4797,25 +4797,30 @@ def from_dict(cls, d): Input dict can take several forms:: - d = {'t': {'dims': ('t'), 'data': t}, - 'a': {'dims': ('t'), 'data': x}, - 'b': {'dims': ('t'), 'data': y}} - - d = {'coords': {'t': {'dims': 't', 'data': t, - 'attrs': {'units':'s'}}}, - 'attrs': {'title': 'air temperature'}, - 'dims': 't', - 'data_vars': {'a': {'dims': 't', 'data': x, }, - 'b': {'dims': 't', 'data': y}}} + >>> d = { + ... "t": {"dims": ("t"), "data": t}, + ... "a": {"dims": ("t"), "data": x}, + ... "b": {"dims": ("t"), "data": y}, + ... } + + >>> d = { + ... "coords": {"t": {"dims": "t", "data": t, "attrs": {"units": "s"}}}, + ... "attrs": {"title": "air temperature"}, + ... "dims": "t", + ... "data_vars": { + ... "a": {"dims": "t", "data": x,}, + ... "b": {"dims": "t", "data": y}, + ... }, + ... } - where 't' is the name of the dimesion, 'a' and 'b' are names of data + where "t" is the name of the dimesion, "a" and "b" are names of data variables and t, x, and y are lists, numpy.arrays or pandas objects. Parameters ---------- d : dict-like Mapping with a minimum structure of - ``{'var_0': {'dims': [..], 'data': [..]}, \ + ``{"var_0": {"dims": [..], "data": [..]}, \ ...}`` Returns @@ -5447,8 +5452,8 @@ def differentiate(self, coord, edge_order=1, datetime_unit=None): The coordinate to be used to compute the gradient. edge_order : {1, 2}, default: 1 N-th order accurate differences at the boundaries. - datetime_unit : None or {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', \ - 'us', 'ns', 'ps', 'fs', 'as'}, default: None + datetime_unit : None or {"Y", "M", "W", "D", "h", "m", "s", "ms", \ + "us", "ns", "ps", "fs", "as"}, default: None Unit to compute gradient. Only valid for datetime coordinate. Returns From 939793cfe5edb4d14345d0ccbe5277348afbc500 Mon Sep 17 00:00:00 2001 From: Keewis Date: Wed, 12 Aug 2020 15:56:25 +0200 Subject: [PATCH 39/50] use code blocks instead of literal blocks with doctest lines --- xarray/core/dataarray.py | 20 +++++++++++--------- xarray/core/dataset.py | 36 +++++++++++++++++++----------------- 2 files changed, 30 insertions(+), 26 deletions(-) diff --git a/xarray/core/dataarray.py b/xarray/core/dataarray.py index c68a8e004d3..16ce642912f 100644 --- a/xarray/core/dataarray.py +++ b/xarray/core/dataarray.py @@ -2488,17 +2488,19 @@ def from_dict(cls, d: dict) -> "DataArray": """ Convert a dictionary into an xarray.DataArray - Input dict can take several forms:: + Input dict can take several forms: - >>> d = {"dims": ("t"), "data": x} + .. code:: python - >>> d = { - ... "coords": {"t": {"dims": "t", "data": t, "attrs": {"units": "s"}}}, - ... "attrs": {"title": "air temperature"}, - ... "dims": "t", - ... "data": x, - ... "name": "a", - ... } + d = {"dims": ("t"), "data": x} + + d = { + "coords": {"t": {"dims": "t", "data": t, "attrs": {"units": "s"}}}, + "attrs": {"title": "air temperature"}, + "dims": "t", + "data": x, + "name": "a", + } where "t" is the name of the dimesion, "a" is the name of the array, and x and t are lists, numpy.arrays, or pandas objects. diff --git a/xarray/core/dataset.py b/xarray/core/dataset.py index 77f9dc6be77..8c0aea8b983 100644 --- a/xarray/core/dataset.py +++ b/xarray/core/dataset.py @@ -4795,23 +4795,25 @@ def from_dict(cls, d): """ Convert a dictionary into an xarray.Dataset. - Input dict can take several forms:: - - >>> d = { - ... "t": {"dims": ("t"), "data": t}, - ... "a": {"dims": ("t"), "data": x}, - ... "b": {"dims": ("t"), "data": y}, - ... } - - >>> d = { - ... "coords": {"t": {"dims": "t", "data": t, "attrs": {"units": "s"}}}, - ... "attrs": {"title": "air temperature"}, - ... "dims": "t", - ... "data_vars": { - ... "a": {"dims": "t", "data": x,}, - ... "b": {"dims": "t", "data": y}, - ... }, - ... } + Input dict can take several forms + + .. code:: python + + d = { + "t": {"dims": ("t"), "data": t}, + "a": {"dims": ("t"), "data": x}, + "b": {"dims": ("t"), "data": y}, + } + + d = { + "coords": {"t": {"dims": "t", "data": t, "attrs": {"units": "s"}}}, + "attrs": {"title": "air temperature"}, + "dims": "t", + "data_vars": { + "a": {"dims": "t", "data": x,}, + "b": {"dims": "t", "data": y}, + }, + } where "t" is the name of the dimesion, "a" and "b" are names of data variables and t, x, and y are lists, numpy.arrays or pandas objects. From a0230b614432a84bb3c05f6d56054b1215cafa69 Mon Sep 17 00:00:00 2001 From: Keewis Date: Wed, 12 Aug 2020 20:14:26 +0200 Subject: [PATCH 40/50] add back a removed colon --- xarray/core/dataset.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xarray/core/dataset.py b/xarray/core/dataset.py index 8c0aea8b983..3af093b13da 100644 --- a/xarray/core/dataset.py +++ b/xarray/core/dataset.py @@ -4795,7 +4795,7 @@ def from_dict(cls, d): """ Convert a dictionary into an xarray.Dataset. - Input dict can take several forms + Input dict can take several forms: .. code:: python From e000be12b74f635da0b54576b7c12ae8b6cc786b Mon Sep 17 00:00:00 2001 From: Keewis Date: Wed, 12 Aug 2020 21:27:01 +0200 Subject: [PATCH 41/50] convert the glossary page to a sphinx glossary --- doc/terminology.rst | 129 +++++++++++++++++++++++++++++++------------- 1 file changed, 92 insertions(+), 37 deletions(-) diff --git a/doc/terminology.rst b/doc/terminology.rst index ab6d856920a..90c6c3ceeb6 100644 --- a/doc/terminology.rst +++ b/doc/terminology.rst @@ -4,40 +4,95 @@ Terminology =========== -*Xarray terminology differs slightly from CF, mathematical conventions, and pandas; and therefore using xarray, understanding the documentation, and parsing error messages is easier once key terminology is defined. This glossary was designed so that more fundamental concepts come first. Thus for new users, this page is best read top-to-bottom. Throughout the glossary,* ``arr`` *will refer to an xarray* :py:class:`DataArray` *in any small examples. For more complete examples, please consult the relevant documentation.* - ----- - -**DataArray:** A multi-dimensional array with labeled or named dimensions. ``DataArray`` objects add metadata such as dimension names, coordinates, and attributes (defined below) to underlying "unlabeled" data structures such as numpy and Dask arrays. If its optional ``name`` property is set, it is a *named DataArray*. - ----- - -**Dataset:** A dict-like collection of ``DataArray`` objects with aligned dimensions. Thus, most operations that can be performed on the dimensions of a single ``DataArray`` can be performed on a dataset. Datasets have data variables (see **Variable** below), dimensions, coordinates, and attributes. - ----- - -**Variable:** A `NetCDF-like variable `_ consisting of dimensions, data, and attributes which describe a single array. The main functional difference between variables and numpy arrays is that numerical operations on variables implement array broadcasting by dimension name. Each ``DataArray`` has an underlying variable that can be accessed via ``arr.variable``. However, a variable is not fully described outside of either a ``Dataset`` or a ``DataArray``. - -.. note:: - - The :py:class:`Variable` class is low-level interface and can typically be ignored. However, the word "variable" appears often enough in the code and documentation that is useful to understand. - ----- - -**Dimension:** In mathematics, the *dimension* of data is loosely the number of degrees of freedom for it. A *dimension axis* is a set of all points in which all but one of these degrees of freedom is fixed. We can think of each dimension axis as having a name, for example the "x dimension". In xarray, a ``DataArray`` object's *dimensions* are its named dimension axes, and the name of the ``i``-th dimension is ``arr.dims[i]``. If an array is created without dimensions, the default dimension names are ``dim_0``, ``dim_1``, and so forth. - ----- - -**Coordinate:** An array that labels a dimension or set of dimensions of another ``DataArray``. In the usual one-dimensional case, the coordinate array's values can loosely be thought of as tick labels along a dimension. There are two types of coordinate arrays: *dimension coordinates* and *non-dimension coordinates* (see below). A coordinate named ``x`` can be retrieved from ``arr.coords[x]``. A ``DataArray`` can have more coordinates than dimensions because a single dimension can be labeled by multiple coordinate arrays. However, only one coordinate array can be a assigned as a particular dimension's dimension coordinate array. As a consequence, ``len(arr.dims) <= len(arr.coords)`` in general. - ----- - -**Dimension coordinate:** A one-dimensional coordinate array assigned to ``arr`` with both a name and dimension name in ``arr.dims``. Dimension coordinates are used for label-based indexing and alignment, like the index found on a :py:class:`pandas.DataFrame` or :py:class:`pandas.Series`. In fact, dimension coordinates use :py:class:`pandas.Index` objects under the hood for efficient computation. Dimension coordinates are marked by ``*`` when printing a ``DataArray`` or ``Dataset``. - ----- - -**Non-dimension coordinate:** A coordinate array assigned to ``arr`` with a name in ``arr.coords`` but *not* in ``arr.dims``. These coordinates arrays can be one-dimensional or multidimensional, and they are useful for auxiliary labeling. As an example, multidimensional coordinates are often used in geoscience datasets when :doc:`the data's physical coordinates (such as latitude and longitude) differ from their logical coordinates `. However, non-dimension coordinates are not indexed, and any operation on non-dimension coordinates that leverages indexing will fail. Printing ``arr.coords`` will print all of ``arr``'s coordinate names, with the corresponding dimension(s) in parentheses. For example, ``coord_name (dim_name) 1 2 3 ...``. - ----- - -**Index:** An *index* is a data structure optimized for efficient selecting and slicing of an associated array. Xarray creates indexes for dimension coordinates so that operations along dimensions are fast, while non-dimension coordinates are not indexed. Under the hood, indexes are implemented as :py:class:`pandas.Index` objects. The index associated with dimension name ``x`` can be retrieved by ``arr.indexes[x]``. By construction, ``len(arr.dims) == len(arr.indexes)`` +*Xarray terminology differs slightly from CF, mathematical conventions, and +pandas; and therefore using xarray, understanding the documentation, and +parsing error messages is easier once key terminology is defined. This glossary +was designed so that more fundamental concepts come first. Thus for new users, +this page is best read top-to-bottom. Throughout the glossary,* ``arr`` *will +refer to an xarray* :py:class:`DataArray` *in any small examples. For more +complete examples, please consult the relevant documentation.* + +.. glossary:: + + DataArray + A multi-dimensional array with labeled or named + dimensions. ``DataArray`` objects add metadata such as dimension names, + coordinates, and attributes (defined below) to underlying "unlabeled" + data structures such as numpy and Dask arrays. If its optional ``name`` + property is set, it is a *named DataArray*. + + Dataset + A dict-like collection of ``DataArray`` objects with aligned + dimensions. Thus, most operations that can be performed on the + dimensions of a single ``DataArray`` can be performed on a + dataset. Datasets have data variables (see **Variable** below), + dimensions, coordinates, and attributes. + + Variable + A `NetCDF-like variable + `_ + consisting of dimensions, data, and attributes which describe a single + array. The main functional difference between variables and numpy arrays + is that numerical operations on variables implement array broadcasting + by dimension name. Each ``DataArray`` has an underlying variable that + can be accessed via ``arr.variable``. However, a variable is not fully + described outside of either a ``Dataset`` or a ``DataArray``. + + .. note:: + + The :py:class:`Variable` class is low-level interface and can + typically be ignored. However, the word "variable" appears often + enough in the code and documentation that is useful to understand. + + Dimension + In mathematics, the *dimension* of data is loosely the number of degrees + of freedom for it. A *dimension axis* is a set of all points in which + all but one of these degrees of freedom is fixed. We can think of each + dimension axis as having a name, for example the "x dimension". In + xarray, a ``DataArray`` object's *dimensions* are its named dimension + axes, and the name of the ``i``-th dimension is ``arr.dims[i]``. If an + array is created without dimensions, the default dimension names are + ``dim_0``, ``dim_1``, and so forth. + + Coordinate + An array that labels a dimension or set of dimensions of another + ``DataArray``. In the usual one-dimensional case, the coordinate array's + values can loosely be thought of as tick labels along a dimension. There + are two types of coordinate arrays: *dimension coordinates* and + *non-dimension coordinates* (see below). A coordinate named ``x`` can be + retrieved from ``arr.coords[x]``. A ``DataArray`` can have more + coordinates than dimensions because a single dimension can be labeled by + multiple coordinate arrays. However, only one coordinate array can be a + assigned as a particular dimension's dimension coordinate array. As a + consequence, ``len(arr.dims) <= len(arr.coords)`` in general. + + Dimension coordinate + A one-dimensional coordinate array assigned to ``arr`` with both a name + and dimension name in ``arr.dims``. Dimension coordinates are used for + label-based indexing and alignment, like the index found on a + :py:class:`pandas.DataFrame` or :py:class:`pandas.Series`. In fact, + dimension coordinates use :py:class:`pandas.Index` objects under the + hood for efficient computation. Dimension coordinates are marked by + ``*`` when printing a ``DataArray`` or ``Dataset``. + + Non-dimension coordinate + A coordinate array assigned to ``arr`` with a name in ``arr.coords`` but + *not* in ``arr.dims``. These coordinates arrays can be one-dimensional + or multidimensional, and they are useful for auxiliary labeling. As an + example, multidimensional coordinates are often used in geoscience + datasets when :doc:`the data's physical coordinates (such as latitude + and longitude) differ from their logical coordinates + `. However, non-dimension coordinates + are not indexed, and any operation on non-dimension coordinates that + leverages indexing will fail. Printing ``arr.coords`` will print all of + ``arr``'s coordinate names, with the corresponding dimension(s) in + parentheses. For example, ``coord_name (dim_name) 1 2 3 ...``. + + Index + An *index* is a data structure optimized for efficient selecting and + slicing of an associated array. Xarray creates indexes for dimension + coordinates so that operations along dimensions are fast, while + non-dimension coordinates are not indexed. Under the hood, indexes are + implemented as :py:class:`pandas.Index` objects. The index associated + with dimension name ``x`` can be retrieved by ``arr.indexes[x]``. By + construction, ``len(arr.dims) == len(arr.indexes)`` From f3546fcd130203318dec044e71a7a124c00d0c87 Mon Sep 17 00:00:00 2001 From: Keewis Date: Wed, 12 Aug 2020 21:27:40 +0200 Subject: [PATCH 42/50] add term descriptions for names and scalars --- doc/terminology.rst | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/doc/terminology.rst b/doc/terminology.rst index 90c6c3ceeb6..a04d16d41e6 100644 --- a/doc/terminology.rst +++ b/doc/terminology.rst @@ -96,3 +96,13 @@ complete examples, please consult the relevant documentation.* implemented as :py:class:`pandas.Index` objects. The index associated with dimension name ``x`` can be retrieved by ``arr.indexes[x]``. By construction, ``len(arr.dims) == len(arr.indexes)`` + + name + The names of dimensions, coordinates, DataArray objects and data + variables can be anything as long as they are :term:`hashable`. However, + it is preferred to use :py:class:`str` typed names. + + scalar + By definition, a scalar is not :term:`array_like`. That means that, + e.g., :py:class:`int`, :py:class:`float`, and :py:class:`str` values are + "scalar" while :py:class:`list` or :py:class:`tuple` are not. From 74086d7a7e76dcc7101882689f1a95047f5aae3e Mon Sep 17 00:00:00 2001 From: Keewis Date: Wed, 12 Aug 2020 21:28:34 +0200 Subject: [PATCH 43/50] preprocess types but don't use :param: because that will separate combined parameter docs by duplicating the description --- doc/conf.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/conf.py b/doc/conf.py index dbc0af90193..f5075a2f5c1 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -106,8 +106,9 @@ autosummary_generate = True autodoc_typehints = "none" -napoleon_use_param = True +napoleon_use_param = False napoleon_use_rtype = True +napoleon_preprocess_types = True napoleon_type_aliases = { # general terms "sequence": ":term:`sequence`", From 9a744958933784e58a8c39f1649234fba75679ce Mon Sep 17 00:00:00 2001 From: Keewis Date: Wed, 12 Aug 2020 21:36:04 +0200 Subject: [PATCH 44/50] link to the terms --- doc/conf.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index f5075a2f5c1..2d50d34bb3f 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -112,7 +112,6 @@ napoleon_type_aliases = { # general terms "sequence": ":term:`sequence`", - "hashable": ":term:`hashable`", "iterable": ":term:`iterable`", "callable": ":py:func:`callable`", "dict_like": ":term:`dict-like `", @@ -130,8 +129,9 @@ # numpy terms "array_like": ":term:`array_like`", "array-like": ":term:`array-like `", - # "scalar": ":term:`scalar`", + "scalar": ":term:`scalar`", "array": ":term:`array`", + "hashable": ":term:`hashable `", # matplotlib terms "color-like": ":py:func:`color-like `", "matplotlib colormap name": ":doc:matplotlib colormap name ", From f1731ec9fa18dada5a9750952ef9d46a3d6ca603 Mon Sep 17 00:00:00 2001 From: Keewis Date: Wed, 12 Aug 2020 22:03:10 +0200 Subject: [PATCH 45/50] don't try to link to the repr method --- doc/whats-new.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/whats-new.rst b/doc/whats-new.rst index a5ec4560f24..2d72a69e918 100644 --- a/doc/whats-new.rst +++ b/doc/whats-new.rst @@ -28,7 +28,7 @@ New Features - :py:meth:`~xarray.DataArray.rolling` and :py:meth:`~xarray.Dataset.rolling` now accept more than 1 dimension.(:pull:`4219`) By `Keisuke Fujii `_. -- Build :py:meth:`CFTimeIndex.__repr__` explicitly as :py:class:`pandas.Index`. Add ``calendar`` as a new +- Build ``CFTimeIndex.__repr__`` explicitly as :py:class:`pandas.Index`. Add ``calendar`` as a new property for :py:class:`CFTimeIndex` and show ``calendar`` and ``length`` in ``CFTimeIndex.__repr__`` (:issue:`2416`, :pull:`4092`) By `Aaron Spring `_. From f299eec5abbc282ce118f23fed626bd39a9de6f7 Mon Sep 17 00:00:00 2001 From: Keewis Date: Thu, 13 Aug 2020 12:59:07 +0200 Subject: [PATCH 46/50] update the definition of scalar according to the review --- doc/terminology.rst | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/doc/terminology.rst b/doc/terminology.rst index a04d16d41e6..c83dbe85b51 100644 --- a/doc/terminology.rst +++ b/doc/terminology.rst @@ -103,6 +103,7 @@ complete examples, please consult the relevant documentation.* it is preferred to use :py:class:`str` typed names. scalar - By definition, a scalar is not :term:`array_like`. That means that, - e.g., :py:class:`int`, :py:class:`float`, and :py:class:`str` values are - "scalar" while :py:class:`list` or :py:class:`tuple` are not. + By definition, a scalar is not an :term:`array` and when converted to + one, it has 0 dimensions. That means that, e.g., :py:class:`int`, + :py:class:`float`, and :py:class:`str` objects are "scalar" while + :py:class:`list` or :py:class:`tuple` are not. From 0f6641bbfd0d3a3aa385a23228ea225c18b158f8 Mon Sep 17 00:00:00 2001 From: keewis Date: Fri, 14 Aug 2020 09:16:10 +0200 Subject: [PATCH 47/50] Update doc/terminology.rst Co-authored-by: Maximilian Roos <5635139+max-sixty@users.noreply.github.com> --- doc/terminology.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/terminology.rst b/doc/terminology.rst index c83dbe85b51..4b923d836dd 100644 --- a/doc/terminology.rst +++ b/doc/terminology.rst @@ -51,7 +51,7 @@ complete examples, please consult the relevant documentation.* dimension axis as having a name, for example the "x dimension". In xarray, a ``DataArray`` object's *dimensions* are its named dimension axes, and the name of the ``i``-th dimension is ``arr.dims[i]``. If an - array is created without dimensions, the default dimension names are + array is created without dimension names, the default dimension names are ``dim_0``, ``dim_1``, and so forth. Coordinate From 055b0bad4cd0eb0c827153311cb150e504a901f9 Mon Sep 17 00:00:00 2001 From: keewis Date: Fri, 14 Aug 2020 22:30:51 +0200 Subject: [PATCH 48/50] Update doc/terminology.rst Co-authored-by: Maximilian Roos <5635139+max-sixty@users.noreply.github.com> --- doc/terminology.rst | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/doc/terminology.rst b/doc/terminology.rst index 4b923d836dd..a85837bafbc 100644 --- a/doc/terminology.rst +++ b/doc/terminology.rst @@ -5,11 +5,8 @@ Terminology =========== *Xarray terminology differs slightly from CF, mathematical conventions, and -pandas; and therefore using xarray, understanding the documentation, and -parsing error messages is easier once key terminology is defined. This glossary -was designed so that more fundamental concepts come first. Thus for new users, -this page is best read top-to-bottom. Throughout the glossary,* ``arr`` *will -refer to an xarray* :py:class:`DataArray` *in any small examples. For more +pandas; so we've put together a glossary of its terms. Here,* ``arr`` * +refers to an xarray* :py:class:`DataArray` *in the examples. For more complete examples, please consult the relevant documentation.* .. glossary:: From bb5116716da9d139d21fb5d4e6b873baa531d54d Mon Sep 17 00:00:00 2001 From: Keewis Date: Wed, 19 Aug 2020 11:56:07 +0200 Subject: [PATCH 49/50] fix a bad merge --- doc/weather-climate.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/weather-climate.rst b/doc/weather-climate.rst index 40b2d171d9d..cb2921e2ed1 100644 --- a/doc/weather-climate.rst +++ b/doc/weather-climate.rst @@ -207,5 +207,5 @@ For data indexed by a :py:class:`~xarray.CFTimeIndex` xarray currently supports: encoded in your data and the dates stored in memory. .. _Timestamp-valid range: https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timestamp-limitations -.. _ISO 8601-format: https://en.wikipedia.org/wiki/ISO_8601 +.. _ISO 8601 standard: https://en.wikipedia.org/wiki/ISO_8601 .. _partial datetime string indexing: https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#partial-string-indexing From 419dd0181eda1c0cbdb00d151b07a8f0408e50e7 Mon Sep 17 00:00:00 2001 From: Keewis Date: Wed, 19 Aug 2020 13:43:41 +0200 Subject: [PATCH 50/50] fix the docstring of cftime_range --- xarray/coding/cftime_offsets.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/xarray/coding/cftime_offsets.py b/xarray/coding/cftime_offsets.py index 6dfdb408bb5..4e77530dfdb 100644 --- a/xarray/coding/cftime_offsets.py +++ b/xarray/coding/cftime_offsets.py @@ -795,19 +795,19 @@ def cftime_range( Left bound for generating dates. end : str or cftime.datetime, optional Right bound for generating dates. - periods : integer, optional + periods : int, optional Number of periods to generate. - freq : str, default "D", BaseCFTimeOffset, or None + freq : str or None, default: "D" Frequency strings can have multiples, e.g. "5H". normalize : bool, default: False Normalize start/end dates to midnight before generating date range. name : str, default: None Name of the resulting index - closed : {None, "left", "right"}, optional + closed : {"left", "right"} or None, default: None Make the interval closed with respect to the given frequency to the - "left", "right", or both sides (None, the default). - calendar : str - Calendar type for the datetimes (default "standard"). + "left", "right", or both sides (None). + calendar : str, default: "standard" + Calendar type for the datetimes. Returns -------