diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml new file mode 100644 index 0000000000..8a0a42f747 --- /dev/null +++ b/.github/workflows/benchmark.yml @@ -0,0 +1,70 @@ +# This is a basic workflow to help you get started with Actions + +name: benchmark-check + +# Controls when the workflow will run +on: + # Triggers the workflow on push or pull request events but only for the master branch + pull_request: + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +# A workflow run is made up of one or more jobs that can run sequentially or in parallel +jobs: + # This workflow contains a single job called "build" + build: + # The type of runner that the job will run on + runs-on: ubuntu-latest + + # Steps represent a sequence of tasks that will be executed as part of the job + steps: + # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it + - uses: actions/checkout@v2 + with: + fetch-depth: 0 + + # This is too complicated to get working for now. + # + # # fetch the target branch from origin too (just the head) + # # and check it out into a local branch + # - name: Checkout PR and target branch + # run: | + # git fetch --no-tags --depth=1 origin ${{ github.base_ref }} + # git fetch --no-tags --depth=1 origin ${{ github.head_ref }} + + - name: Setup asv + run: | + pip install asv + cd benchmarks + asv machine --yes --machine gh-actions + + - name: Run benchmarks on source and target + run: | + cd benchmarks + export BASE=origin/${{ github.base_ref }} + export HEAD=origin/${{ github.head_ref }} + # run asv benchmarks on the target + # -s 1 ensures only one "step" (the tip of the branch) is run + asv run $BASE -s 1 + # run asv on the head of this PR + asv run $HEAD -s 1 + asv compare -s $BASE $HEAD > .asv/compare.txt + + - name: Archive asv results + uses: actions/upload-artifact@v2 + with: + name: asv-report + path: | + benchmarks/.asv/results + benchmarks/.asv/compare.txt + + - name: Fail if performance degraded + run: | + cd benchmarks + cat .asv/compare.txt + if grep -q "Benchmarks that have got worse" .asv/compare.txt; then + echo "::error::Performance degradation. See action artifact for full results." + exit 1 + fi + diff --git a/asv.conf.json b/asv.conf.json deleted file mode 100644 index 46cd4839f2..0000000000 --- a/asv.conf.json +++ /dev/null @@ -1,37 +0,0 @@ -// See https://asv.readthedocs.io/en/stable/asv.conf.json.html for -// details on what can be included in this file. -{ - "version": 1, - "project": "scitools-iris", - "project_url": "https://github.com/SciTools/iris", - "repo": ".", - "environment_type": "conda", - "show_commit_url": "http://github.com/scitools/iris/commit/", - "conda_channels": ["conda-forge", "defaults"], - "matrix": { - "cartopy": [], - "proj4": ["5"], - "cf-units": [], - "cftime": [], - "dask": [], - "matplotlib": [], - "netcdf4": [], - "numpy": [], - "scipy": [], - - "setuptools": [], - "pyke": [], - "six": [], - - "nose": [], - "pep8": [], - "filelock": [], - "imagehash": [], - "requests": [], - }, - - "benchmark_dir": "lib/iris/tests/benchmarking", - "env_dir": ".asv/env", - "results_dir": ".asv/results", - "html_dir": ".asv/html", -} diff --git a/benchmarks/asv.conf.json b/benchmarks/asv.conf.json new file mode 100644 index 0000000000..46ee5aa9e9 --- /dev/null +++ b/benchmarks/asv.conf.json @@ -0,0 +1,17 @@ +{ + "version": 1, + "project": "scitools-iris", + "project_url": "https://github.com/SciTools/iris", + "repo": "..", + "environment_type": "conda-lock", + "show_commit_url": "http://github.com/scitools/iris/commit/", + + "benchmark_dir": "./benchmarks", + "env_dir": ".asv/env", + "results_dir": ".asv/results", + "html_dir": ".asv/html", + "plugins": [".conda_lock_plugin"], + // this is not an asv standard config entry, just for our plugin + // path to lockfile, relative to project base + "conda_lockfile": "requirements/ci/nox.lock/py38-linux-64.lock" +} diff --git a/benchmarks/benchmarks/__init__.py b/benchmarks/benchmarks/__init__.py new file mode 100644 index 0000000000..0247578eae --- /dev/null +++ b/benchmarks/benchmarks/__init__.py @@ -0,0 +1,40 @@ +"""Common code for benchmarks.""" + +import os +from pathlib import Path + +# Environment variable names +_ASVDIR_VARNAME = 'ASV_DIR' # As set in nightly script "asv_nightly/asv.sh" +_DATADIR_VARNAME = 'BENCHMARK_DATA' # For local runs + +ARTIFICIAL_DIM_SIZE = int(10e3) # For all artificial cubes, coords etc. + +# Work out where the benchmark data dir is. +asv_dir = os.environ.get('ASV_DIR', None) +if asv_dir: + # For an overnight run, this comes from the 'ASV_DIR' setting. + benchmark_data_dir = Path(asv_dir) / 'data' +else: + # For a local run, you set 'BENCHMARK_DATA'. + benchmark_data_dir = os.environ.get(_DATADIR_VARNAME, None) + if benchmark_data_dir is not None: + benchmark_data_dir = Path(benchmark_data_dir) + + +def testdata_path(*path_names): + """ + Return the path of a benchmark test data file. + + These are based from a test-data location dir, which is either + ${}/data (for overnight tests), or ${} for local testing. + + If neither of these were set, an error is raised. + + """.format(_ASVDIR_VARNAME, _DATADIR_VARNAME) + if benchmark_data_dir is None: + msg = ('Benchmark data dir is not defined : ' + 'Either "${}" or "${}" must be set.') + raise(ValueError(msg.format(_ASVDIR_VARNAME, _DATADIR_VARNAME))) + path = benchmark_data_dir.joinpath(*path_names) + path = str(path) # Because Iris doesn't understand Path objects yet. + return path \ No newline at end of file diff --git a/benchmarks/benchmarks/aux_factory.py b/benchmarks/benchmarks/aux_factory.py new file mode 100644 index 0000000000..b9cf540377 --- /dev/null +++ b/benchmarks/benchmarks/aux_factory.py @@ -0,0 +1,53 @@ +""" +AuxFactory benchmark tests. + +""" + +import numpy as np + +from benchmarks import ARTIFICIAL_DIM_SIZE +from iris import aux_factory, coords + + +class FactoryCommon: + # TODO: once https://github.com/airspeed-velocity/asv/pull/828 is released: + # * make class an ABC + # * remove NotImplementedError + # * combine setup_common into setup + + """ + A base class running a generalised suite of benchmarks for any factory. + Factory to be specified in a subclass. + + ASV will run the benchmarks within this class for any subclasses. + + Should only be instantiated within subclasses, but cannot enforce this + since ASV cannot handle classes that include abstract methods. + """ + def setup(self): + """Prevent ASV instantiating (must therefore override setup() in any subclasses.)""" + raise NotImplementedError + + def setup_common(self): + """Shared setup code that can be called by subclasses.""" + self.factory = self.create() + + def time_create(self): + """Create an instance of the benchmarked factory. create method is + specified in the subclass.""" + self.create() + + def time_return(self): + """Return an instance of the benchmarked factory.""" + self.factory + + +class HybridHeightFactory(FactoryCommon): + def setup(self): + data_1d = np.zeros(ARTIFICIAL_DIM_SIZE) + self.coord = coords.AuxCoord(points=data_1d, units="m") + + self.setup_common() + + def create(self): + return aux_factory.HybridHeightFactory(delta=self.coord) diff --git a/benchmarks/benchmarks/coords.py b/benchmarks/benchmarks/coords.py new file mode 100644 index 0000000000..59b70d9d2c --- /dev/null +++ b/benchmarks/benchmarks/coords.py @@ -0,0 +1,113 @@ +""" +Coord benchmark tests. + +""" + +import numpy as np + +from benchmarks import ARTIFICIAL_DIM_SIZE +from iris import coords + + +def setup(): + """General variables needed by multiple benchmark classes.""" + global data_1d + + data_1d = np.zeros(ARTIFICIAL_DIM_SIZE) + + +class CoordCommon: + # TODO: once https://github.com/airspeed-velocity/asv/pull/828 is released: + # * make class an ABC + # * remove NotImplementedError + # * combine setup_common into setup + """ + + A base class running a generalised suite of benchmarks for any coord. + Coord to be specified in a subclass. + + ASV will run the benchmarks within this class for any subclasses. + + Should only be instantiated within subclasses, but cannot enforce this + since ASV cannot handle classes that include abstract methods. + """ + def setup(self): + """Prevent ASV instantiating (must therefore override setup() in any subclasses.)""" + raise NotImplementedError + + def setup_common(self): + """Shared setup code that can be called by subclasses.""" + self.component = self.create() + + def time_create(self): + """Create an instance of the benchmarked coord. create method is + specified in the subclass.""" + self.create() + + def time_return(self): + """Return an instance of the benchmarked coord.""" + self.component + + +class DimCoord(CoordCommon): + def setup(self): + point_values = np.arange(ARTIFICIAL_DIM_SIZE) + bounds = np.array( + [point_values - 1, point_values + 1]).transpose() + + self.create_kwargs = { + "points": point_values, + "bounds": bounds, + "units": "days since 1970-01-01", + "climatological": True + } + + self.setup_common() + + def create(self): + return coords.DimCoord(**self.create_kwargs) + + def time_regular(self): + coords.DimCoord.from_regular(0, 1, 1000) + + +class AuxCoord(CoordCommon): + def setup(self): + bounds = np.array( + [data_1d - 1, data_1d + 1]).transpose() + + self.create_kwargs = { + "points": data_1d, + "bounds": bounds, + "units": "days since 1970-01-01", + "climatological": True + } + + self.setup_common() + + def create(self): + return coords.AuxCoord(**self.create_kwargs) + + +class CellMeasure(CoordCommon): + def setup(self): + self.setup_common() + + def create(self): + return coords.CellMeasure(data_1d) + + +class CellMethod(CoordCommon): + def setup(self): + self.setup_common() + + def create(self): + return coords.CellMethod("test") + + +class AncillaryVariable(CoordCommon): + def setup(self): + self.setup_common() + + def create(self): + return coords.AncillaryVariable(data_1d) \ No newline at end of file diff --git a/benchmarks/benchmarks/cube.py b/benchmarks/benchmarks/cube.py new file mode 100644 index 0000000000..0101761c88 --- /dev/null +++ b/benchmarks/benchmarks/cube.py @@ -0,0 +1,226 @@ +""" +Cube benchmark tests. + +""" + +import numpy as np + +from benchmarks import ARTIFICIAL_DIM_SIZE +from iris import analysis, aux_factory, coords, cube + + +def setup(): + """General variables needed by multiple benchmark classes.""" + global data_1d + global data_2d + global general_cube + + data_2d = np.zeros((ARTIFICIAL_DIM_SIZE,) * 2) + data_1d = data_2d[0] + general_cube = cube.Cube(data_2d) + + +class ComponentCommon: + # TODO: once https://github.com/airspeed-velocity/asv/pull/828 is released: + # * make class an ABC + # * remove NotImplementedError + # * combine setup_common into setup + + """ + A base class running a generalised suite of benchmarks for cubes that + include a specified component (e.g. Coord, CellMeasure etc.). Component to + be specified in a subclass. + + ASV will run the benchmarks within this class for any subclasses. + + Should only be instantiated within subclasses, but cannot enforce this + since ASV cannot handle classes that include abstract methods. + """ + + def setup(self): + """Prevent ASV instantiating (must therefore override setup() in any subclasses.)""" + raise NotImplementedError + + def create(self): + """Generic cube creation. cube_kwargs allow dynamic inclusion of + different components; specified in subclasses.""" + return cube.Cube(data=data_2d, **self.cube_kwargs) + + def setup_common(self): + """Shared setup code that can be called by subclasses.""" + self.cube = self.create() + + def time_create(self): + """Create a cube that includes an instance of the benchmarked component.""" + self.create() + + def time_add(self): + """Add an instance of the benchmarked component to an existing cube.""" + # Unable to create the copy during setup since this needs to be re-done + # for every repeat of the test (some components disallow duplicates). + general_cube_copy = general_cube.copy(data=data_2d) + self.add_method(general_cube_copy, *self.add_args) + + def time_return(self): + """Return a cube that includes an instance of the benchmarked component.""" + self.cube + + +class Cube: + def time_basic(self): + cube.Cube(data_2d) + + def time_rename(self): + general_cube.name = "air_temperature" + + +class AuxCoord(ComponentCommon): + def setup(self): + self.coord_name = "test" + coord_bounds = np.array( + [data_1d - 1, data_1d + 1]).transpose() + aux_coord = coords.AuxCoord(long_name=self.coord_name, + points=data_1d, + bounds=coord_bounds, + units="days since 1970-01-01", + climatological=True) + + # Variables needed by the ComponentCommon base class. + self.cube_kwargs = { + "aux_coords_and_dims": [(aux_coord, 0)] + } + self.add_method = cube.Cube.add_aux_coord + self.add_args = (aux_coord, (0)) + + self.setup_common() + + def time_return_coords(self): + self.cube.coords() + + def time_return_coord_dims(self): + self.cube.coord_dims(self.coord_name) + + +class AuxFactory(ComponentCommon): + def setup(self): + coord = coords.AuxCoord(points=data_1d, units="m") + self.hybrid_factory = aux_factory.HybridHeightFactory(delta=coord) + + # Variables needed by the ComponentCommon base class. + self.cube_kwargs = { + "aux_coords_and_dims": [(coord, 0)], + "aux_factories": [self.hybrid_factory] + } + + self.setup_common() + + # Variables needed by the overridden time_add benchmark in this subclass. + cube_w_coord = self.cube.copy() + [cube_w_coord.remove_aux_factory(i) for i in cube_w_coord.aux_factories] + self.cube_w_coord = cube_w_coord + + def time_add(self): + # Requires override from super().time_add because the cube needs an + # additional coord. + self.cube_w_coord.add_aux_factory(self.hybrid_factory) + + +class CellMeasure(ComponentCommon): + def setup(self): + cell_measure = coords.CellMeasure(data_1d) + + # Variables needed by the ComponentCommon base class. + self.cube_kwargs = { + "cell_measures_and_dims": [(cell_measure, 0)] + } + self.add_method = cube.Cube.add_cell_measure + self.add_args = (cell_measure, 0) + + self.setup_common() + + +class CellMethod(ComponentCommon): + def setup(self): + cell_method = coords.CellMethod("test") + + # Variables needed by the ComponentCommon base class. + self.cube_kwargs = { + "cell_methods": [cell_method] + } + self.add_method = cube.Cube.add_cell_method + self.add_args = [cell_method] + + self.setup_common() + + +class AncillaryVariable(ComponentCommon): + def setup(self): + ancillary_variable = coords.AncillaryVariable(data_1d) + + # Variables needed by the ComponentCommon base class. + self.cube_kwargs = { + "ancillary_variables_and_dims": [(ancillary_variable, 0)] + } + self.add_method = cube.Cube.add_ancillary_variable + self.add_args = (ancillary_variable, 0) + + self.setup_common() + + +class Merge: + def setup(self): + self.cube_list = cube.CubeList() + for i in np.arange(2): + i_cube = general_cube.copy() + i_coord = coords.AuxCoord([i]) + i_cube.add_aux_coord(i_coord) + self.cube_list.append(i_cube) + + def time_merge(self): + self.cube_list.merge() + + +class Concatenate: + def setup(self): + dim_size = ARTIFICIAL_DIM_SIZE + self.cube_list = cube.CubeList() + for i in np.arange(dim_size * 2, step=dim_size): + i_cube = general_cube.copy() + i_coord = coords.DimCoord(np.arange(dim_size) + (i * dim_size)) + i_cube.add_dim_coord(i_coord, 0) + self.cube_list.append(i_cube) + + def time_concatenate(self): + self.cube_list.concatenate() + + +class Equality: + def setup(self): + self.cube_a = general_cube.copy() + self.cube_b = general_cube.copy() + + aux_coord = coords.AuxCoord(data_1d) + self.cube_a.add_aux_coord(aux_coord, 0) + self.cube_b.add_aux_coord(aux_coord, 1) + + def time_equality(self): + self.cube_a == self.cube_b + + +class Aggregation: + def setup(self): + repeat_number = 10 + repeat_range = range(int(ARTIFICIAL_DIM_SIZE / repeat_number)) + array_repeat = np.repeat(repeat_range, repeat_number) + array_unique = np.arange(len(array_repeat)) + + coord_repeat = coords.AuxCoord(points=array_repeat, long_name="repeat") + coord_unique = coords.DimCoord(points=array_unique, long_name="unique") + + local_cube = general_cube.copy() + local_cube.add_aux_coord(coord_repeat, 0) + local_cube.add_dim_coord(coord_unique, 0) + self.cube = local_cube + + def time_aggregated_by(self): + self.cube.aggregated_by("repeat", analysis.MEAN) diff --git a/benchmarks/benchmarks/import_iris.py b/benchmarks/benchmarks/import_iris.py new file mode 100644 index 0000000000..3b12f3fb14 --- /dev/null +++ b/benchmarks/benchmarks/import_iris.py @@ -0,0 +1,237 @@ +import sys + + +class Iris: + warmup_time = 0 + number = 1 + repeat = 10 + + def setup(self): + self.before = set(sys.modules.keys()) + + def teardown(self): + after = set(sys.modules.keys()) + diff = after - self.before + for module in diff: + sys.modules.pop(module) + + def time_iris(self): + import iris + + def time__concatenate(self): + import iris._concatenate + + def time__constraints(self): + import iris._constraints + + def time__data_manager(self): + import iris._data_manager + + def time__deprecation(self): + import iris._deprecation + + def time__lazy_data(self): + import iris._lazy_data + + def time__merge(self): + import iris._merge + + def time__representation(self): + import iris._representation + + def time_analysis(self): + import iris.analysis + + def time_analysis__area_weighted(self): + import iris.analysis._area_weighted + + def time_analysis__grid_angles(self): + import iris.analysis._grid_angles + + def time_analysis__interpolation(self): + import iris.analysis._interpolation + + def time_analysis__regrid(self): + import iris.analysis._regrid + + def time_analysis__scipy_interpolate(self): + import iris.analysis._scipy_interpolate + + def time_analysis_calculus(self): + import iris.analysis.calculus + + def time_analysis_cartography(self): + import iris.analysis.cartography + + def time_analysis_geomerty(self): + import iris.analysis.geometry + + def time_analysis_maths(self): + import iris.analysis.maths + + def time_analysis_stats(self): + import iris.analysis.stats + + def time_analysis_trajectory(self): + import iris.analysis.trajectory + + def time_aux_factory(self): + import iris.aux_factory + + def time_common(self): + import iris.common + + def time_common_lenient(self): + import iris.common.lenient + + def time_common_metadata(self): + import iris.common.metadata + + def time_common_mixin(self): + import iris.common.mixin + + def time_common_resolve(self): + import iris.common.resolve + + def time_config(self): + import iris.config + + def time_coord_categorisation(self): + import iris.coord_categorisation + + def time_coord_systems(self): + import iris.coord_systems + + def time_coords(self): + import iris.coords + + def time_cube(self): + import iris.cube + + def time_exceptions(self): + import iris.exceptions + + def time_experimental(self): + import iris.experimental + + def time_fileformats(self): + import iris.fileformats + + def time_fileformats__ff(self): + import iris.fileformats._ff + + def time_fileformats__ff_cross_references(self): + import iris.fileformats._ff_cross_references + + def time_fileformats__pp_lbproc_pairs(self): + import iris.fileformats._pp_lbproc_pairs + + def time_fileformats_structured_array_identification(self): + import iris.fileformats._structured_array_identification + + def time_fileformats_abf(self): + import iris.fileformats.abf + + def time_fileformats_cf(self): + import iris.fileformats.cf + + def time_fileformats_dot(self): + import iris.fileformats.dot + + def time_fileformats_name(self): + import iris.fileformats.name + + def time_fileformats_name_loaders(self): + import iris.fileformats.name_loaders + + def time_fileformats_netcdf(self): + import iris.fileformats.netcdf + + def time_fileformats_nimrod(self): + import iris.fileformats.nimrod + + def time_fileformats_nimrod_load_rules(self): + import iris.fileformats.nimrod_load_rules + + def time_fileformats_pp(self): + import iris.fileformats.pp + + def time_fileformats_pp_load_rules(self): + import iris.fileformats.pp_load_rules + + def time_fileformats_pp_save_rules(self): + import iris.fileformats.pp_save_rules + + def time_fileformats_rules(self): + import iris.fileformats.rules + + def time_fileformats_um(self): + import iris.fileformats.um + + def time_fileformats_um__fast_load(self): + import iris.fileformats.um._fast_load + + def time_fileformats_um__fast_load_structured_fields(self): + import iris.fileformats.um._fast_load_structured_fields + + def time_fileformats_um__ff_replacement(self): + import iris.fileformats.um._ff_replacement + + def time_fileformats_um__optimal_array_structuring(self): + import iris.fileformats.um._optimal_array_structuring + + def time_fileformats_um_cf_map(self): + import iris.fileformats.um_cf_map + + def time_io(self): + import iris.io + + def time_io_format_picker(self): + import iris.io.format_picker + + def time_iterate(self): + import iris.iterate + + def time_palette(self): + import iris.palette + + def time_plot(self): + import iris.plot + + def time_quickplot(self): + import iris.quickplot + + def time_std_names(self): + import iris.std_names + + def time_symbols(self): + import iris.symbols + + def time_tests(self): + import iris.tests + + def time_time(self): + import iris.time + + def time_util(self): + import iris.util + +# third-party imports + + def time_third_party_cartopy(self): + import cartopy + + def time_third_party_cf_units(self): + import cf_units + + def time_third_party_cftime(self): + import cftime + + def time_third_party_matplotlib(self): + import matplotlib + + def time_third_party_numpy(self): + import numpy + + def time_third_party_scipy(self): + import scipy diff --git a/benchmarks/benchmarks/iterate.py b/benchmarks/benchmarks/iterate.py new file mode 100644 index 0000000000..590dfd94ae --- /dev/null +++ b/benchmarks/benchmarks/iterate.py @@ -0,0 +1,34 @@ +""" +Iterate benchmark tests. + +""" +import numpy as np + +from benchmarks import ARTIFICIAL_DIM_SIZE +from iris import coords, cube, iterate + + +def setup(): + """General variables needed by multiple benchmark classes.""" + global data_1d + global data_2d + global general_cube + + data_2d = np.zeros((ARTIFICIAL_DIM_SIZE,) * 2) + data_1d = data_2d[0] + general_cube = cube.Cube(data_2d) + + +class IZip: + def setup(self): + local_cube = general_cube.copy() + coord_a = coords.AuxCoord(points=data_1d, long_name="a") + coord_b = coords.AuxCoord(points=data_1d, long_name="b") + self.coord_names = (coord.long_name for coord in (coord_a, coord_b)) + + local_cube.add_aux_coord(coord_a, 0) + local_cube.add_aux_coord(coord_b, 1) + self.cube = local_cube + + def time_izip(self): + iterate.izip(self.cube, coords=self.coord_names) diff --git a/benchmarks/benchmarks/metadata_manager_factory.py b/benchmarks/benchmarks/metadata_manager_factory.py new file mode 100644 index 0000000000..43c7b19255 --- /dev/null +++ b/benchmarks/benchmarks/metadata_manager_factory.py @@ -0,0 +1,75 @@ +""" +metadata_manager_factory benchmark tests. + +""" + +from iris.common import metadata_manager_factory, AncillaryVariableMetadata, BaseMetadata, CellMeasureMetadata, CoordMetadata, CubeMetadata, DimCoordMetadata + + +class MetadataManagerFactory__create: + params = [1, 10, 100] + + def time_AncillaryVariableMetadata(self, n): + [metadata_manager_factory(AncillaryVariableMetadata) for _ in range(n)] + + def time_BaseMetadata(self, n): + [metadata_manager_factory(BaseMetadata) for _ in range(n)] + + def time_CellMeasureMetadata(self, n): + [metadata_manager_factory(CellMeasureMetadata) for _ in range(n)] + + def time_CoordMetadata(self, n): + [metadata_manager_factory(CoordMetadata) for _ in range(n)] + + def time_CubeMetadata(self, n): + [metadata_manager_factory(CubeMetadata) for _ in range(n)] + + def time_DimCoordMetadata(self, n): + [metadata_manager_factory(DimCoordMetadata) for _ in range(n)] + + +class MetadataManagerFactory: + def setup(self): + self.ancillary = metadata_manager_factory(AncillaryVariableMetadata) + self.base = metadata_manager_factory(BaseMetadata) + self.cell = metadata_manager_factory(CellMeasureMetadata) + self.coord = metadata_manager_factory(CoordMetadata) + self.cube = metadata_manager_factory(CubeMetadata) + self.dim = metadata_manager_factory(DimCoordMetadata) + + def time_AncillaryVariableMetadata_fields(self): + self.ancillary.fields + + def time_AncillaryVariableMetadata_values(self): + self.ancillary.values + + def time_BaseMetadata_fields(self): + self.base.fields + + def time_BaseMetadata_values(self): + self.base.values + + def time_CellMeasuresMetadata_fields(self): + self.cell.fields + + def time_CellMeasuresMetadata_values(self): + self.cell.values + + def time_CoordMetadata_fields(self): + self.coord.fields + + def time_CoordMetadata_values(self): + self.coord.values + + def time_CubeMetadata_fields(self): + self.cube.fields + + def time_CubeMetadata_values(self): + self.cube.values + + def time_DimCoordMetadata_fields(self): + self.dim.fields + + def time_DimCoordMetadata_values(self): + self.dim.values + diff --git a/benchmarks/benchmarks/mixin.py b/benchmarks/benchmarks/mixin.py new file mode 100644 index 0000000000..38f44175e5 --- /dev/null +++ b/benchmarks/benchmarks/mixin.py @@ -0,0 +1,79 @@ +""" +Mixin benchmark tests. + +""" + +import numpy as np + +from benchmarks import ARTIFICIAL_DIM_SIZE +from iris import coords +from iris.common.metadata import AncillaryVariableMetadata + + +LONG_NAME = "air temperature" +STANDARD_NAME = "air_temperature" +VAR_NAME = "air_temp" +UNITS = "degrees" +ATTRIBUTES = dict(a=1) +DICT = dict( + standard_name=STANDARD_NAME, + long_name=LONG_NAME, + var_name=VAR_NAME, + units=UNITS, + attributes=ATTRIBUTES, +) +METADATA = AncillaryVariableMetadata(**DICT) +TUPLE = tuple(DICT.values()) + + +class CFVariableMixin: + def setup(self): + data_1d = np.zeros(ARTIFICIAL_DIM_SIZE) + + # These benchmarks are from a user perspective, so using a user-level + # subclass of CFVariableMixin to test behaviour. AncillaryVariable is + # the simplest so using that. + self.cfm_proxy = coords.AncillaryVariable(data_1d) + self.cfm_proxy.long_name = "test" + + def time_get_long_name(self): + self.cfm_proxy.long_name + + def time_set_long_name(self): + self.cfm_proxy.long_name = LONG_NAME + + def time_get_standard_name(self): + self.cfm_proxy.standard_name + + def time_set_standard_name(self): + self.cfm_proxy.standard_name = STANDARD_NAME + + def time_get_var_name(self): + self.cfm_proxy.var_name + + def time_set_var_name(self): + self.cfm_proxy.var_name = VAR_NAME + + def time_get_units(self): + self.cfm_proxy.units + + def time_set_units(self): + self.cfm_proxy.units = UNITS + + def time_get_attributes(self): + self.cfm_proxy.attributes + + def time_set_attributes(self): + self.cfm_proxy.attributes = ATTRIBUTES + + def time_get_metadata(self): + self.cfm_proxy.metadata + + def time_set_metadata__dict(self): + self.cfm_proxy.metadata = DICT + + def time_set_metadata__tuple(self): + self.cfm_proxy.metadata = TUPLE + + def time_set_metadata__metadata(self): + self.cfm_proxy.metadata = METADATA diff --git a/benchmarks/benchmarks/plot.py b/benchmarks/benchmarks/plot.py new file mode 100644 index 0000000000..3169f3cf93 --- /dev/null +++ b/benchmarks/benchmarks/plot.py @@ -0,0 +1,32 @@ +""" +Plot benchmark tests. + +""" +import matplotlib +import numpy as np + +from benchmarks import ARTIFICIAL_DIM_SIZE +from iris import coords, cube, plot + +matplotlib.use("agg") + +class AuxSort: + def setup(self): + # Manufacture data from which contours can be derived. + # Should generate 10 distinct contours, regardless of dim size. + dim_size = int(ARTIFICIAL_DIM_SIZE / 5) + repeat_number = int(dim_size / 10) + repeat_range = range(int((dim_size ** 2) / repeat_number)) + data = np.repeat(repeat_range, repeat_number) + data = data.reshape((dim_size,) * 2) + + # These benchmarks are from a user perspective, so setting up a + # user-level case that will prompt the calling of aux_coords.sort in plot.py. + dim_coord = coords.DimCoord(np.arange(dim_size)) + local_cube = cube.Cube(data) + local_cube.add_aux_coord(dim_coord, 0) + self.cube = local_cube + + def time_aux_sort(self): + # Contour plot arbitrarily picked. Known to prompt aux_coords.sort. + plot.contour(self.cube) diff --git a/benchmarks/conda_lock_plugin.py b/benchmarks/conda_lock_plugin.py new file mode 100644 index 0000000000..1107873c3e --- /dev/null +++ b/benchmarks/conda_lock_plugin.py @@ -0,0 +1,48 @@ +""" +ASV plug-in providing an alternative ``Environment`` subclass, which uses Nox +for environment management. + +""" +from asv.console import log +from asv.plugins.conda import Conda, _find_conda + +class CondaLock(Conda): + """ + Create the environment based on a **version-controlled** lockfile. + + Creating the environment instance is deferred until ``install_project`` time, + when the commit hash etc is known and we can access the lock file. + The environment is then overwritten by the specification provided at the + ``config.conda_lockfile`` path. ``conda.conda_lockfile`` must point to + an @EXPLICIT conda manifest, e.g. the output of either the ``conda-lock`` tool, + or ``conda list --explicit``. + """ + tool_name = "conda-lock" + + def __init__(self, conf, python, requirements): + self._lockfile_path = conf.conda_lockfile + super().__init__(conf, python, requirements) + + def _uninstall_project(self): + if self._get_installed_commit_hash(): + # we can only run the uninstall command if an environment has already + # been made before, otherwise there is no python to use to uninstall + super()._uninstall_project() + # TODO: we probably want to conda uninstall all the packages too + # something like: + # conda list --no-pip | sed /^#/d | cut -f 1 -d " " | xargs conda uninstall + + def _setup(self): + # create the shell of a conda environment, that includes no packages + log.info("Creating conda environment for {0}".format(self.name)) + self.run_executable(_find_conda(), ['create', "-y", '-p', self._path, '--force']) + + def _build_project(self, repo, commit_hash, build_dir): + # at "build" time, we build the environment from the provided lockfile + self.run_executable(_find_conda(), ["install", "-y", "-p", self._path, "--file", f"{build_dir}/{self._lockfile_path}"]) + # this is set to warning as the asv.commands.run._do_build function + # explicitly raises the log level to WARN, and I want to see the environment being updated + # in the stdout log. + log.warning(f"Environment {self.name} updated to spec at {commit_hash[:8]}") + log.debug(self.run_executable(_find_conda(), ["list", "-p", self._path])) + return super()._build_project(repo, commit_hash, build_dir) \ No newline at end of file diff --git a/lib/iris/cube.py b/lib/iris/cube.py index a15951900b..724b251620 100644 --- a/lib/iris/cube.py +++ b/lib/iris/cube.py @@ -888,6 +888,8 @@ def __init__( ... (longitude, 1)]) """ + # removed the sleep + # Temporary error while we transition the API. if isinstance(data, str): raise TypeError("Invalid data type: {!r}.".format(data))