From da7e32f616b99247a6d05f80b7f07bcd77235c4f Mon Sep 17 00:00:00 2001 From: James Penn Date: Thu, 22 Jul 2021 09:32:31 +0000 Subject: [PATCH 01/22] Simple conda-lock asv plugin --- asv.conf.json | 37 ------------------------- benchmarks/asv.conf.json | 17 ++++++++++++ benchmarks/benchmarks/__init__.py | 0 benchmarks/benchmarks/basic.py | 7 +++++ benchmarks/conda_lock_plugin.py | 46 +++++++++++++++++++++++++++++++ 5 files changed, 70 insertions(+), 37 deletions(-) delete mode 100644 asv.conf.json create mode 100644 benchmarks/asv.conf.json create mode 100644 benchmarks/benchmarks/__init__.py create mode 100644 benchmarks/benchmarks/basic.py create mode 100644 benchmarks/conda_lock_plugin.py diff --git a/asv.conf.json b/asv.conf.json deleted file mode 100644 index 46cd4839f2..0000000000 --- a/asv.conf.json +++ /dev/null @@ -1,37 +0,0 @@ -// See https://asv.readthedocs.io/en/stable/asv.conf.json.html for -// details on what can be included in this file. -{ - "version": 1, - "project": "scitools-iris", - "project_url": "https://github.com/SciTools/iris", - "repo": ".", - "environment_type": "conda", - "show_commit_url": "http://github.com/scitools/iris/commit/", - "conda_channels": ["conda-forge", "defaults"], - "matrix": { - "cartopy": [], - "proj4": ["5"], - "cf-units": [], - "cftime": [], - "dask": [], - "matplotlib": [], - "netcdf4": [], - "numpy": [], - "scipy": [], - - "setuptools": [], - "pyke": [], - "six": [], - - "nose": [], - "pep8": [], - "filelock": [], - "imagehash": [], - "requests": [], - }, - - "benchmark_dir": "lib/iris/tests/benchmarking", - "env_dir": ".asv/env", - "results_dir": ".asv/results", - "html_dir": ".asv/html", -} diff --git a/benchmarks/asv.conf.json b/benchmarks/asv.conf.json new file mode 100644 index 0000000000..c442fbefcc --- /dev/null +++ b/benchmarks/asv.conf.json @@ -0,0 +1,17 @@ +// See https://asv.readthedocs.io/en/stable/asv.conf.json.html for +// details on what can be included in this file. +{ + "version": 1, + "project": "scitools-iris", + "project_url": "https://github.com/SciTools/iris", + "repo": "..", + "environment_type": "conda-lock", + "show_commit_url": "http://github.com/scitools/iris/commit/", + + "benchmark_dir": "./benchmarks", + "env_dir": ".asv/env", + "results_dir": ".asv/results", + "html_dir": ".asv/html", + "plugins": [".conda_lock_plugin"], + "conda_lockfile": "../requirements/ci/nox.lock/py38-linux-64.lock" +} diff --git a/benchmarks/benchmarks/__init__.py b/benchmarks/benchmarks/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/benchmarks/benchmarks/basic.py b/benchmarks/benchmarks/basic.py new file mode 100644 index 0000000000..94dd21054c --- /dev/null +++ b/benchmarks/benchmarks/basic.py @@ -0,0 +1,7 @@ +def time_range(): + for i in range(1000): + pass + +def TimeRange(): + for i in range(1000): + pass \ No newline at end of file diff --git a/benchmarks/conda_lock_plugin.py b/benchmarks/conda_lock_plugin.py new file mode 100644 index 0000000000..5b4ce93d32 --- /dev/null +++ b/benchmarks/conda_lock_plugin.py @@ -0,0 +1,46 @@ +""" +ASV plug-in providing an alternative ``Environment`` subclass, which uses Nox +for environment management. + +""" +from asv.console import log +from asv.plugins.conda import Conda, _find_conda + +class CondaLock(Conda): + """ + Create the environment based on a **version-controlled** lockfile. + + Creating the environment instance is deferred until ``install_project`` time, + when the commit hash etc is known and we can access the lock file. + The environment is then overwritten by the specification provided at the + ``config.conda_lockfile`` path. ``conda.conda_lockfile`` must point to + an @EXPLICIT conda manifest, e.g. the output of either the ``conda-lock`` tool, + or ``conda list --explicit``. + """ + tool_name = "conda-lock" + + def _uninstall_project(self): + if self._get_installed_commit_hash(): + # we can only run the uninstall command if an environment has already + # been made before, otherwise there is no python to use to uninstall + return super()._uninstall_project() + + def _setup(self): + # create the shell of a conda environment, that includes no packages + log.info("Creating conda environment for {0}".format(self.name)) + self.run_executable(_find_conda(), ['create', "-y", '-p', self._path, '--force']) + + def _build_project(self, repo, commit_hash, build_dir): + # at "build" time, we build the environment from the provided lockfile + self.run_executable(_find_conda(), ["install", "-y", "-p", self._path, "--file", f"{build_dir}/requirements/ci/nox.lock/py38-linux-64.lock"]) + # this is set to warning as the asv.commands.run._do_build function + # explicitly raises the log level to WARN, and I want to see the environment being updated + # in the stdout log. + log.warning(f"Environment {self.name} updated to spec at {commit_hash[:8]}") + log.debug(self.run_executable(_find_conda(), ["list", "-p", self._path])) + # self._build_command = "" + # return super()._build_project(repo, commit_hash, build_dir) + + def _install_project(self, repo, commit_hash, build_dir): + self._install_command = "pip install --no-deps --editable {build_dir}" + return super()._install_project(repo, commit_hash, build_dir) \ No newline at end of file From 09852ac17874026a6c0deb9361c39e5fd93c37e1 Mon Sep 17 00:00:00 2001 From: James Penn Date: Thu, 22 Jul 2021 10:03:31 +0000 Subject: [PATCH 02/22] Added existing benchmarks. Did *not* add existing benchmarks that depend on external data files --- benchmarks/asv.conf.json | 6 +- benchmarks/benchmarks/__init__.py | 40 +++ benchmarks/benchmarks/aux_factory.py | 53 ++++ benchmarks/benchmarks/coords.py | 113 +++++++++ benchmarks/benchmarks/cube.py | 226 +++++++++++++++++ benchmarks/benchmarks/import_iris.py | 237 ++++++++++++++++++ benchmarks/benchmarks/iterate.py | 34 +++ .../benchmarks/metadata_manager_factory.py | 75 ++++++ benchmarks/benchmarks/mixin.py | 79 ++++++ benchmarks/benchmarks/plot.py | 32 +++ benchmarks/conda_lock_plugin.py | 9 +- 11 files changed, 900 insertions(+), 4 deletions(-) create mode 100644 benchmarks/benchmarks/aux_factory.py create mode 100644 benchmarks/benchmarks/coords.py create mode 100644 benchmarks/benchmarks/cube.py create mode 100644 benchmarks/benchmarks/import_iris.py create mode 100644 benchmarks/benchmarks/iterate.py create mode 100644 benchmarks/benchmarks/metadata_manager_factory.py create mode 100644 benchmarks/benchmarks/mixin.py create mode 100644 benchmarks/benchmarks/plot.py diff --git a/benchmarks/asv.conf.json b/benchmarks/asv.conf.json index c442fbefcc..46ee5aa9e9 100644 --- a/benchmarks/asv.conf.json +++ b/benchmarks/asv.conf.json @@ -1,5 +1,3 @@ -// See https://asv.readthedocs.io/en/stable/asv.conf.json.html for -// details on what can be included in this file. { "version": 1, "project": "scitools-iris", @@ -13,5 +11,7 @@ "results_dir": ".asv/results", "html_dir": ".asv/html", "plugins": [".conda_lock_plugin"], - "conda_lockfile": "../requirements/ci/nox.lock/py38-linux-64.lock" + // this is not an asv standard config entry, just for our plugin + // path to lockfile, relative to project base + "conda_lockfile": "requirements/ci/nox.lock/py38-linux-64.lock" } diff --git a/benchmarks/benchmarks/__init__.py b/benchmarks/benchmarks/__init__.py index e69de29bb2..0247578eae 100644 --- a/benchmarks/benchmarks/__init__.py +++ b/benchmarks/benchmarks/__init__.py @@ -0,0 +1,40 @@ +"""Common code for benchmarks.""" + +import os +from pathlib import Path + +# Environment variable names +_ASVDIR_VARNAME = 'ASV_DIR' # As set in nightly script "asv_nightly/asv.sh" +_DATADIR_VARNAME = 'BENCHMARK_DATA' # For local runs + +ARTIFICIAL_DIM_SIZE = int(10e3) # For all artificial cubes, coords etc. + +# Work out where the benchmark data dir is. +asv_dir = os.environ.get('ASV_DIR', None) +if asv_dir: + # For an overnight run, this comes from the 'ASV_DIR' setting. + benchmark_data_dir = Path(asv_dir) / 'data' +else: + # For a local run, you set 'BENCHMARK_DATA'. + benchmark_data_dir = os.environ.get(_DATADIR_VARNAME, None) + if benchmark_data_dir is not None: + benchmark_data_dir = Path(benchmark_data_dir) + + +def testdata_path(*path_names): + """ + Return the path of a benchmark test data file. + + These are based from a test-data location dir, which is either + ${}/data (for overnight tests), or ${} for local testing. + + If neither of these were set, an error is raised. + + """.format(_ASVDIR_VARNAME, _DATADIR_VARNAME) + if benchmark_data_dir is None: + msg = ('Benchmark data dir is not defined : ' + 'Either "${}" or "${}" must be set.') + raise(ValueError(msg.format(_ASVDIR_VARNAME, _DATADIR_VARNAME))) + path = benchmark_data_dir.joinpath(*path_names) + path = str(path) # Because Iris doesn't understand Path objects yet. + return path \ No newline at end of file diff --git a/benchmarks/benchmarks/aux_factory.py b/benchmarks/benchmarks/aux_factory.py new file mode 100644 index 0000000000..b9cf540377 --- /dev/null +++ b/benchmarks/benchmarks/aux_factory.py @@ -0,0 +1,53 @@ +""" +AuxFactory benchmark tests. + +""" + +import numpy as np + +from benchmarks import ARTIFICIAL_DIM_SIZE +from iris import aux_factory, coords + + +class FactoryCommon: + # TODO: once https://github.com/airspeed-velocity/asv/pull/828 is released: + # * make class an ABC + # * remove NotImplementedError + # * combine setup_common into setup + + """ + A base class running a generalised suite of benchmarks for any factory. + Factory to be specified in a subclass. + + ASV will run the benchmarks within this class for any subclasses. + + Should only be instantiated within subclasses, but cannot enforce this + since ASV cannot handle classes that include abstract methods. + """ + def setup(self): + """Prevent ASV instantiating (must therefore override setup() in any subclasses.)""" + raise NotImplementedError + + def setup_common(self): + """Shared setup code that can be called by subclasses.""" + self.factory = self.create() + + def time_create(self): + """Create an instance of the benchmarked factory. create method is + specified in the subclass.""" + self.create() + + def time_return(self): + """Return an instance of the benchmarked factory.""" + self.factory + + +class HybridHeightFactory(FactoryCommon): + def setup(self): + data_1d = np.zeros(ARTIFICIAL_DIM_SIZE) + self.coord = coords.AuxCoord(points=data_1d, units="m") + + self.setup_common() + + def create(self): + return aux_factory.HybridHeightFactory(delta=self.coord) diff --git a/benchmarks/benchmarks/coords.py b/benchmarks/benchmarks/coords.py new file mode 100644 index 0000000000..59b70d9d2c --- /dev/null +++ b/benchmarks/benchmarks/coords.py @@ -0,0 +1,113 @@ +""" +Coord benchmark tests. + +""" + +import numpy as np + +from benchmarks import ARTIFICIAL_DIM_SIZE +from iris import coords + + +def setup(): + """General variables needed by multiple benchmark classes.""" + global data_1d + + data_1d = np.zeros(ARTIFICIAL_DIM_SIZE) + + +class CoordCommon: + # TODO: once https://github.com/airspeed-velocity/asv/pull/828 is released: + # * make class an ABC + # * remove NotImplementedError + # * combine setup_common into setup + """ + + A base class running a generalised suite of benchmarks for any coord. + Coord to be specified in a subclass. + + ASV will run the benchmarks within this class for any subclasses. + + Should only be instantiated within subclasses, but cannot enforce this + since ASV cannot handle classes that include abstract methods. + """ + def setup(self): + """Prevent ASV instantiating (must therefore override setup() in any subclasses.)""" + raise NotImplementedError + + def setup_common(self): + """Shared setup code that can be called by subclasses.""" + self.component = self.create() + + def time_create(self): + """Create an instance of the benchmarked coord. create method is + specified in the subclass.""" + self.create() + + def time_return(self): + """Return an instance of the benchmarked coord.""" + self.component + + +class DimCoord(CoordCommon): + def setup(self): + point_values = np.arange(ARTIFICIAL_DIM_SIZE) + bounds = np.array( + [point_values - 1, point_values + 1]).transpose() + + self.create_kwargs = { + "points": point_values, + "bounds": bounds, + "units": "days since 1970-01-01", + "climatological": True + } + + self.setup_common() + + def create(self): + return coords.DimCoord(**self.create_kwargs) + + def time_regular(self): + coords.DimCoord.from_regular(0, 1, 1000) + + +class AuxCoord(CoordCommon): + def setup(self): + bounds = np.array( + [data_1d - 1, data_1d + 1]).transpose() + + self.create_kwargs = { + "points": data_1d, + "bounds": bounds, + "units": "days since 1970-01-01", + "climatological": True + } + + self.setup_common() + + def create(self): + return coords.AuxCoord(**self.create_kwargs) + + +class CellMeasure(CoordCommon): + def setup(self): + self.setup_common() + + def create(self): + return coords.CellMeasure(data_1d) + + +class CellMethod(CoordCommon): + def setup(self): + self.setup_common() + + def create(self): + return coords.CellMethod("test") + + +class AncillaryVariable(CoordCommon): + def setup(self): + self.setup_common() + + def create(self): + return coords.AncillaryVariable(data_1d) \ No newline at end of file diff --git a/benchmarks/benchmarks/cube.py b/benchmarks/benchmarks/cube.py new file mode 100644 index 0000000000..0101761c88 --- /dev/null +++ b/benchmarks/benchmarks/cube.py @@ -0,0 +1,226 @@ +""" +Cube benchmark tests. + +""" + +import numpy as np + +from benchmarks import ARTIFICIAL_DIM_SIZE +from iris import analysis, aux_factory, coords, cube + + +def setup(): + """General variables needed by multiple benchmark classes.""" + global data_1d + global data_2d + global general_cube + + data_2d = np.zeros((ARTIFICIAL_DIM_SIZE,) * 2) + data_1d = data_2d[0] + general_cube = cube.Cube(data_2d) + + +class ComponentCommon: + # TODO: once https://github.com/airspeed-velocity/asv/pull/828 is released: + # * make class an ABC + # * remove NotImplementedError + # * combine setup_common into setup + + """ + A base class running a generalised suite of benchmarks for cubes that + include a specified component (e.g. Coord, CellMeasure etc.). Component to + be specified in a subclass. + + ASV will run the benchmarks within this class for any subclasses. + + Should only be instantiated within subclasses, but cannot enforce this + since ASV cannot handle classes that include abstract methods. + """ + + def setup(self): + """Prevent ASV instantiating (must therefore override setup() in any subclasses.)""" + raise NotImplementedError + + def create(self): + """Generic cube creation. cube_kwargs allow dynamic inclusion of + different components; specified in subclasses.""" + return cube.Cube(data=data_2d, **self.cube_kwargs) + + def setup_common(self): + """Shared setup code that can be called by subclasses.""" + self.cube = self.create() + + def time_create(self): + """Create a cube that includes an instance of the benchmarked component.""" + self.create() + + def time_add(self): + """Add an instance of the benchmarked component to an existing cube.""" + # Unable to create the copy during setup since this needs to be re-done + # for every repeat of the test (some components disallow duplicates). + general_cube_copy = general_cube.copy(data=data_2d) + self.add_method(general_cube_copy, *self.add_args) + + def time_return(self): + """Return a cube that includes an instance of the benchmarked component.""" + self.cube + + +class Cube: + def time_basic(self): + cube.Cube(data_2d) + + def time_rename(self): + general_cube.name = "air_temperature" + + +class AuxCoord(ComponentCommon): + def setup(self): + self.coord_name = "test" + coord_bounds = np.array( + [data_1d - 1, data_1d + 1]).transpose() + aux_coord = coords.AuxCoord(long_name=self.coord_name, + points=data_1d, + bounds=coord_bounds, + units="days since 1970-01-01", + climatological=True) + + # Variables needed by the ComponentCommon base class. + self.cube_kwargs = { + "aux_coords_and_dims": [(aux_coord, 0)] + } + self.add_method = cube.Cube.add_aux_coord + self.add_args = (aux_coord, (0)) + + self.setup_common() + + def time_return_coords(self): + self.cube.coords() + + def time_return_coord_dims(self): + self.cube.coord_dims(self.coord_name) + + +class AuxFactory(ComponentCommon): + def setup(self): + coord = coords.AuxCoord(points=data_1d, units="m") + self.hybrid_factory = aux_factory.HybridHeightFactory(delta=coord) + + # Variables needed by the ComponentCommon base class. + self.cube_kwargs = { + "aux_coords_and_dims": [(coord, 0)], + "aux_factories": [self.hybrid_factory] + } + + self.setup_common() + + # Variables needed by the overridden time_add benchmark in this subclass. + cube_w_coord = self.cube.copy() + [cube_w_coord.remove_aux_factory(i) for i in cube_w_coord.aux_factories] + self.cube_w_coord = cube_w_coord + + def time_add(self): + # Requires override from super().time_add because the cube needs an + # additional coord. + self.cube_w_coord.add_aux_factory(self.hybrid_factory) + + +class CellMeasure(ComponentCommon): + def setup(self): + cell_measure = coords.CellMeasure(data_1d) + + # Variables needed by the ComponentCommon base class. + self.cube_kwargs = { + "cell_measures_and_dims": [(cell_measure, 0)] + } + self.add_method = cube.Cube.add_cell_measure + self.add_args = (cell_measure, 0) + + self.setup_common() + + +class CellMethod(ComponentCommon): + def setup(self): + cell_method = coords.CellMethod("test") + + # Variables needed by the ComponentCommon base class. + self.cube_kwargs = { + "cell_methods": [cell_method] + } + self.add_method = cube.Cube.add_cell_method + self.add_args = [cell_method] + + self.setup_common() + + +class AncillaryVariable(ComponentCommon): + def setup(self): + ancillary_variable = coords.AncillaryVariable(data_1d) + + # Variables needed by the ComponentCommon base class. + self.cube_kwargs = { + "ancillary_variables_and_dims": [(ancillary_variable, 0)] + } + self.add_method = cube.Cube.add_ancillary_variable + self.add_args = (ancillary_variable, 0) + + self.setup_common() + + +class Merge: + def setup(self): + self.cube_list = cube.CubeList() + for i in np.arange(2): + i_cube = general_cube.copy() + i_coord = coords.AuxCoord([i]) + i_cube.add_aux_coord(i_coord) + self.cube_list.append(i_cube) + + def time_merge(self): + self.cube_list.merge() + + +class Concatenate: + def setup(self): + dim_size = ARTIFICIAL_DIM_SIZE + self.cube_list = cube.CubeList() + for i in np.arange(dim_size * 2, step=dim_size): + i_cube = general_cube.copy() + i_coord = coords.DimCoord(np.arange(dim_size) + (i * dim_size)) + i_cube.add_dim_coord(i_coord, 0) + self.cube_list.append(i_cube) + + def time_concatenate(self): + self.cube_list.concatenate() + + +class Equality: + def setup(self): + self.cube_a = general_cube.copy() + self.cube_b = general_cube.copy() + + aux_coord = coords.AuxCoord(data_1d) + self.cube_a.add_aux_coord(aux_coord, 0) + self.cube_b.add_aux_coord(aux_coord, 1) + + def time_equality(self): + self.cube_a == self.cube_b + + +class Aggregation: + def setup(self): + repeat_number = 10 + repeat_range = range(int(ARTIFICIAL_DIM_SIZE / repeat_number)) + array_repeat = np.repeat(repeat_range, repeat_number) + array_unique = np.arange(len(array_repeat)) + + coord_repeat = coords.AuxCoord(points=array_repeat, long_name="repeat") + coord_unique = coords.DimCoord(points=array_unique, long_name="unique") + + local_cube = general_cube.copy() + local_cube.add_aux_coord(coord_repeat, 0) + local_cube.add_dim_coord(coord_unique, 0) + self.cube = local_cube + + def time_aggregated_by(self): + self.cube.aggregated_by("repeat", analysis.MEAN) diff --git a/benchmarks/benchmarks/import_iris.py b/benchmarks/benchmarks/import_iris.py new file mode 100644 index 0000000000..3b12f3fb14 --- /dev/null +++ b/benchmarks/benchmarks/import_iris.py @@ -0,0 +1,237 @@ +import sys + + +class Iris: + warmup_time = 0 + number = 1 + repeat = 10 + + def setup(self): + self.before = set(sys.modules.keys()) + + def teardown(self): + after = set(sys.modules.keys()) + diff = after - self.before + for module in diff: + sys.modules.pop(module) + + def time_iris(self): + import iris + + def time__concatenate(self): + import iris._concatenate + + def time__constraints(self): + import iris._constraints + + def time__data_manager(self): + import iris._data_manager + + def time__deprecation(self): + import iris._deprecation + + def time__lazy_data(self): + import iris._lazy_data + + def time__merge(self): + import iris._merge + + def time__representation(self): + import iris._representation + + def time_analysis(self): + import iris.analysis + + def time_analysis__area_weighted(self): + import iris.analysis._area_weighted + + def time_analysis__grid_angles(self): + import iris.analysis._grid_angles + + def time_analysis__interpolation(self): + import iris.analysis._interpolation + + def time_analysis__regrid(self): + import iris.analysis._regrid + + def time_analysis__scipy_interpolate(self): + import iris.analysis._scipy_interpolate + + def time_analysis_calculus(self): + import iris.analysis.calculus + + def time_analysis_cartography(self): + import iris.analysis.cartography + + def time_analysis_geomerty(self): + import iris.analysis.geometry + + def time_analysis_maths(self): + import iris.analysis.maths + + def time_analysis_stats(self): + import iris.analysis.stats + + def time_analysis_trajectory(self): + import iris.analysis.trajectory + + def time_aux_factory(self): + import iris.aux_factory + + def time_common(self): + import iris.common + + def time_common_lenient(self): + import iris.common.lenient + + def time_common_metadata(self): + import iris.common.metadata + + def time_common_mixin(self): + import iris.common.mixin + + def time_common_resolve(self): + import iris.common.resolve + + def time_config(self): + import iris.config + + def time_coord_categorisation(self): + import iris.coord_categorisation + + def time_coord_systems(self): + import iris.coord_systems + + def time_coords(self): + import iris.coords + + def time_cube(self): + import iris.cube + + def time_exceptions(self): + import iris.exceptions + + def time_experimental(self): + import iris.experimental + + def time_fileformats(self): + import iris.fileformats + + def time_fileformats__ff(self): + import iris.fileformats._ff + + def time_fileformats__ff_cross_references(self): + import iris.fileformats._ff_cross_references + + def time_fileformats__pp_lbproc_pairs(self): + import iris.fileformats._pp_lbproc_pairs + + def time_fileformats_structured_array_identification(self): + import iris.fileformats._structured_array_identification + + def time_fileformats_abf(self): + import iris.fileformats.abf + + def time_fileformats_cf(self): + import iris.fileformats.cf + + def time_fileformats_dot(self): + import iris.fileformats.dot + + def time_fileformats_name(self): + import iris.fileformats.name + + def time_fileformats_name_loaders(self): + import iris.fileformats.name_loaders + + def time_fileformats_netcdf(self): + import iris.fileformats.netcdf + + def time_fileformats_nimrod(self): + import iris.fileformats.nimrod + + def time_fileformats_nimrod_load_rules(self): + import iris.fileformats.nimrod_load_rules + + def time_fileformats_pp(self): + import iris.fileformats.pp + + def time_fileformats_pp_load_rules(self): + import iris.fileformats.pp_load_rules + + def time_fileformats_pp_save_rules(self): + import iris.fileformats.pp_save_rules + + def time_fileformats_rules(self): + import iris.fileformats.rules + + def time_fileformats_um(self): + import iris.fileformats.um + + def time_fileformats_um__fast_load(self): + import iris.fileformats.um._fast_load + + def time_fileformats_um__fast_load_structured_fields(self): + import iris.fileformats.um._fast_load_structured_fields + + def time_fileformats_um__ff_replacement(self): + import iris.fileformats.um._ff_replacement + + def time_fileformats_um__optimal_array_structuring(self): + import iris.fileformats.um._optimal_array_structuring + + def time_fileformats_um_cf_map(self): + import iris.fileformats.um_cf_map + + def time_io(self): + import iris.io + + def time_io_format_picker(self): + import iris.io.format_picker + + def time_iterate(self): + import iris.iterate + + def time_palette(self): + import iris.palette + + def time_plot(self): + import iris.plot + + def time_quickplot(self): + import iris.quickplot + + def time_std_names(self): + import iris.std_names + + def time_symbols(self): + import iris.symbols + + def time_tests(self): + import iris.tests + + def time_time(self): + import iris.time + + def time_util(self): + import iris.util + +# third-party imports + + def time_third_party_cartopy(self): + import cartopy + + def time_third_party_cf_units(self): + import cf_units + + def time_third_party_cftime(self): + import cftime + + def time_third_party_matplotlib(self): + import matplotlib + + def time_third_party_numpy(self): + import numpy + + def time_third_party_scipy(self): + import scipy diff --git a/benchmarks/benchmarks/iterate.py b/benchmarks/benchmarks/iterate.py new file mode 100644 index 0000000000..590dfd94ae --- /dev/null +++ b/benchmarks/benchmarks/iterate.py @@ -0,0 +1,34 @@ +""" +Iterate benchmark tests. + +""" +import numpy as np + +from benchmarks import ARTIFICIAL_DIM_SIZE +from iris import coords, cube, iterate + + +def setup(): + """General variables needed by multiple benchmark classes.""" + global data_1d + global data_2d + global general_cube + + data_2d = np.zeros((ARTIFICIAL_DIM_SIZE,) * 2) + data_1d = data_2d[0] + general_cube = cube.Cube(data_2d) + + +class IZip: + def setup(self): + local_cube = general_cube.copy() + coord_a = coords.AuxCoord(points=data_1d, long_name="a") + coord_b = coords.AuxCoord(points=data_1d, long_name="b") + self.coord_names = (coord.long_name for coord in (coord_a, coord_b)) + + local_cube.add_aux_coord(coord_a, 0) + local_cube.add_aux_coord(coord_b, 1) + self.cube = local_cube + + def time_izip(self): + iterate.izip(self.cube, coords=self.coord_names) diff --git a/benchmarks/benchmarks/metadata_manager_factory.py b/benchmarks/benchmarks/metadata_manager_factory.py new file mode 100644 index 0000000000..43c7b19255 --- /dev/null +++ b/benchmarks/benchmarks/metadata_manager_factory.py @@ -0,0 +1,75 @@ +""" +metadata_manager_factory benchmark tests. + +""" + +from iris.common import metadata_manager_factory, AncillaryVariableMetadata, BaseMetadata, CellMeasureMetadata, CoordMetadata, CubeMetadata, DimCoordMetadata + + +class MetadataManagerFactory__create: + params = [1, 10, 100] + + def time_AncillaryVariableMetadata(self, n): + [metadata_manager_factory(AncillaryVariableMetadata) for _ in range(n)] + + def time_BaseMetadata(self, n): + [metadata_manager_factory(BaseMetadata) for _ in range(n)] + + def time_CellMeasureMetadata(self, n): + [metadata_manager_factory(CellMeasureMetadata) for _ in range(n)] + + def time_CoordMetadata(self, n): + [metadata_manager_factory(CoordMetadata) for _ in range(n)] + + def time_CubeMetadata(self, n): + [metadata_manager_factory(CubeMetadata) for _ in range(n)] + + def time_DimCoordMetadata(self, n): + [metadata_manager_factory(DimCoordMetadata) for _ in range(n)] + + +class MetadataManagerFactory: + def setup(self): + self.ancillary = metadata_manager_factory(AncillaryVariableMetadata) + self.base = metadata_manager_factory(BaseMetadata) + self.cell = metadata_manager_factory(CellMeasureMetadata) + self.coord = metadata_manager_factory(CoordMetadata) + self.cube = metadata_manager_factory(CubeMetadata) + self.dim = metadata_manager_factory(DimCoordMetadata) + + def time_AncillaryVariableMetadata_fields(self): + self.ancillary.fields + + def time_AncillaryVariableMetadata_values(self): + self.ancillary.values + + def time_BaseMetadata_fields(self): + self.base.fields + + def time_BaseMetadata_values(self): + self.base.values + + def time_CellMeasuresMetadata_fields(self): + self.cell.fields + + def time_CellMeasuresMetadata_values(self): + self.cell.values + + def time_CoordMetadata_fields(self): + self.coord.fields + + def time_CoordMetadata_values(self): + self.coord.values + + def time_CubeMetadata_fields(self): + self.cube.fields + + def time_CubeMetadata_values(self): + self.cube.values + + def time_DimCoordMetadata_fields(self): + self.dim.fields + + def time_DimCoordMetadata_values(self): + self.dim.values + diff --git a/benchmarks/benchmarks/mixin.py b/benchmarks/benchmarks/mixin.py new file mode 100644 index 0000000000..38f44175e5 --- /dev/null +++ b/benchmarks/benchmarks/mixin.py @@ -0,0 +1,79 @@ +""" +Mixin benchmark tests. + +""" + +import numpy as np + +from benchmarks import ARTIFICIAL_DIM_SIZE +from iris import coords +from iris.common.metadata import AncillaryVariableMetadata + + +LONG_NAME = "air temperature" +STANDARD_NAME = "air_temperature" +VAR_NAME = "air_temp" +UNITS = "degrees" +ATTRIBUTES = dict(a=1) +DICT = dict( + standard_name=STANDARD_NAME, + long_name=LONG_NAME, + var_name=VAR_NAME, + units=UNITS, + attributes=ATTRIBUTES, +) +METADATA = AncillaryVariableMetadata(**DICT) +TUPLE = tuple(DICT.values()) + + +class CFVariableMixin: + def setup(self): + data_1d = np.zeros(ARTIFICIAL_DIM_SIZE) + + # These benchmarks are from a user perspective, so using a user-level + # subclass of CFVariableMixin to test behaviour. AncillaryVariable is + # the simplest so using that. + self.cfm_proxy = coords.AncillaryVariable(data_1d) + self.cfm_proxy.long_name = "test" + + def time_get_long_name(self): + self.cfm_proxy.long_name + + def time_set_long_name(self): + self.cfm_proxy.long_name = LONG_NAME + + def time_get_standard_name(self): + self.cfm_proxy.standard_name + + def time_set_standard_name(self): + self.cfm_proxy.standard_name = STANDARD_NAME + + def time_get_var_name(self): + self.cfm_proxy.var_name + + def time_set_var_name(self): + self.cfm_proxy.var_name = VAR_NAME + + def time_get_units(self): + self.cfm_proxy.units + + def time_set_units(self): + self.cfm_proxy.units = UNITS + + def time_get_attributes(self): + self.cfm_proxy.attributes + + def time_set_attributes(self): + self.cfm_proxy.attributes = ATTRIBUTES + + def time_get_metadata(self): + self.cfm_proxy.metadata + + def time_set_metadata__dict(self): + self.cfm_proxy.metadata = DICT + + def time_set_metadata__tuple(self): + self.cfm_proxy.metadata = TUPLE + + def time_set_metadata__metadata(self): + self.cfm_proxy.metadata = METADATA diff --git a/benchmarks/benchmarks/plot.py b/benchmarks/benchmarks/plot.py new file mode 100644 index 0000000000..3169f3cf93 --- /dev/null +++ b/benchmarks/benchmarks/plot.py @@ -0,0 +1,32 @@ +""" +Plot benchmark tests. + +""" +import matplotlib +import numpy as np + +from benchmarks import ARTIFICIAL_DIM_SIZE +from iris import coords, cube, plot + +matplotlib.use("agg") + +class AuxSort: + def setup(self): + # Manufacture data from which contours can be derived. + # Should generate 10 distinct contours, regardless of dim size. + dim_size = int(ARTIFICIAL_DIM_SIZE / 5) + repeat_number = int(dim_size / 10) + repeat_range = range(int((dim_size ** 2) / repeat_number)) + data = np.repeat(repeat_range, repeat_number) + data = data.reshape((dim_size,) * 2) + + # These benchmarks are from a user perspective, so setting up a + # user-level case that will prompt the calling of aux_coords.sort in plot.py. + dim_coord = coords.DimCoord(np.arange(dim_size)) + local_cube = cube.Cube(data) + local_cube.add_aux_coord(dim_coord, 0) + self.cube = local_cube + + def time_aux_sort(self): + # Contour plot arbitrarily picked. Known to prompt aux_coords.sort. + plot.contour(self.cube) diff --git a/benchmarks/conda_lock_plugin.py b/benchmarks/conda_lock_plugin.py index 5b4ce93d32..1bba19728a 100644 --- a/benchmarks/conda_lock_plugin.py +++ b/benchmarks/conda_lock_plugin.py @@ -19,11 +19,18 @@ class CondaLock(Conda): """ tool_name = "conda-lock" + def __init__(self, conf, python, requirements): + self._lockfile_path = conf.conda_lockfile + super().__init__(conf, python, requirements) + def _uninstall_project(self): if self._get_installed_commit_hash(): # we can only run the uninstall command if an environment has already # been made before, otherwise there is no python to use to uninstall return super()._uninstall_project() + # TODO: we probably want to conda uninstall all the packages too + # something like: + # conda list | sed /^#/d | cut -f 1 -d " " | xargs conda uninstall def _setup(self): # create the shell of a conda environment, that includes no packages @@ -32,7 +39,7 @@ def _setup(self): def _build_project(self, repo, commit_hash, build_dir): # at "build" time, we build the environment from the provided lockfile - self.run_executable(_find_conda(), ["install", "-y", "-p", self._path, "--file", f"{build_dir}/requirements/ci/nox.lock/py38-linux-64.lock"]) + self.run_executable(_find_conda(), ["install", "-y", "-p", self._path, "--file", f"{build_dir}/{self._lockfile_path}"]) # this is set to warning as the asv.commands.run._do_build function # explicitly raises the log level to WARN, and I want to see the environment being updated # in the stdout log. From ffdeb3f94e1361fe658f894d0874766ea9b8ec4a Mon Sep 17 00:00:00 2001 From: James Penn Date: Thu, 22 Jul 2021 11:01:01 +0000 Subject: [PATCH 03/22] First pass at workflow file --- .github/workflows/benchmark.yml | 45 +++++++++++++++++++++++++++++++++ benchmarks/benchmarks/basic.py | 7 ----- 2 files changed, 45 insertions(+), 7 deletions(-) create mode 100644 .github/workflows/benchmark.yml delete mode 100644 benchmarks/benchmarks/basic.py diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml new file mode 100644 index 0000000000..bdccecb684 --- /dev/null +++ b/.github/workflows/benchmark.yml @@ -0,0 +1,45 @@ +# This is a basic workflow to help you get started with Actions + +name: benchmark-check + +# Controls when the workflow will run +on: + # Triggers the workflow on push or pull request events but only for the master branch + pull_request: + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +# A workflow run is made up of one or more jobs that can run sequentially or in parallel +jobs: + # This workflow contains a single job called "build" + build: + # The type of runner that the job will run on + runs-on: ubuntu-latest + + # Steps represent a sequence of tasks that will be executed as part of the job + steps: + # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it + - uses: actions/checkout@v2 + + - name: Setup asv + run: | + pip install asv + asv machine --yes --machine gh-actions + + - name: Run benchmarks on source and target + run: | + cd benchmarks + # run asv benchmarks on the target + asv run ${{ github.base_ref }} -s 1 -b import_iris + # run asv on the head of this PR + asv run ${{ github.head_ref }} -s 1 -b import_iris + asv publish + + - name: Archive asv results + uses: actions/upload-artifact@v2 + with: + name: asv-report + path: | + benchmarks/.asv/html + benchmarks/.asv/results \ No newline at end of file diff --git a/benchmarks/benchmarks/basic.py b/benchmarks/benchmarks/basic.py deleted file mode 100644 index 94dd21054c..0000000000 --- a/benchmarks/benchmarks/basic.py +++ /dev/null @@ -1,7 +0,0 @@ -def time_range(): - for i in range(1000): - pass - -def TimeRange(): - for i in range(1000): - pass \ No newline at end of file From be72cbf0866407fddff4235cf9da90032f39caa2 Mon Sep 17 00:00:00 2001 From: James Penn Date: Thu, 22 Jul 2021 11:03:00 +0000 Subject: [PATCH 04/22] Move to directory for benchmarks --- .github/workflows/benchmark.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index bdccecb684..bb9cd6c1da 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -25,6 +25,7 @@ jobs: - name: Setup asv run: | pip install asv + cd benchmarks asv machine --yes --machine gh-actions - name: Run benchmarks on source and target From f839910a28418ae7ff248cf1ab15330218fb8662 Mon Sep 17 00:00:00 2001 From: James Penn Date: Thu, 22 Jul 2021 11:06:51 +0000 Subject: [PATCH 05/22] Fetch the target branch --- .github/workflows/benchmark.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index bb9cd6c1da..40e977979e 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -32,6 +32,7 @@ jobs: run: | cd benchmarks # run asv benchmarks on the target + git fetch ${{ github.base_ref }} asv run ${{ github.base_ref }} -s 1 -b import_iris # run asv on the head of this PR asv run ${{ github.head_ref }} -s 1 -b import_iris From 20def5500b88d4c6a7b0cdfbb636e1d123f30a82 Mon Sep 17 00:00:00 2001 From: James Penn Date: Thu, 22 Jul 2021 11:16:52 +0000 Subject: [PATCH 06/22] specify origin --- .github/workflows/benchmark.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 40e977979e..f28c537924 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -32,7 +32,7 @@ jobs: run: | cd benchmarks # run asv benchmarks on the target - git fetch ${{ github.base_ref }} + git fetch origin ${{ github.base_ref }} asv run ${{ github.base_ref }} -s 1 -b import_iris # run asv on the head of this PR asv run ${{ github.head_ref }} -s 1 -b import_iris From bb64f9363daadfca9a0deab0410f984be46a907b Mon Sep 17 00:00:00 2001 From: James Penn Date: Thu, 22 Jul 2021 11:26:30 +0000 Subject: [PATCH 07/22] Different checkout strategy --- .github/workflows/benchmark.yml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index f28c537924..e0334c48b3 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -21,6 +21,12 @@ jobs: steps: # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - uses: actions/checkout@v2 + - run: | + # fetch the target branch from origin too (just the head) + # and check it out into a local branch + git fetch --no-tags --depth=1 origin ${{ github.base_ref }} + git checkout -b ${{ github.base_ref }} + git checkout ${{ github.head_ref }} - name: Setup asv run: | @@ -32,7 +38,6 @@ jobs: run: | cd benchmarks # run asv benchmarks on the target - git fetch origin ${{ github.base_ref }} asv run ${{ github.base_ref }} -s 1 -b import_iris # run asv on the head of this PR asv run ${{ github.head_ref }} -s 1 -b import_iris From 8024bd12d82601575f8932d4a05f82f776ad820d Mon Sep 17 00:00:00 2001 From: James Penn Date: Thu, 22 Jul 2021 11:34:15 +0000 Subject: [PATCH 08/22] Trying shas instead --- .github/workflows/benchmark.yml | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index e0334c48b3..a333fd2794 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -21,12 +21,10 @@ jobs: steps: # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - uses: actions/checkout@v2 + + # fetch the target branch from origin too (just the head) - run: | - # fetch the target branch from origin too (just the head) - # and check it out into a local branch - git fetch --no-tags --depth=1 origin ${{ github.base_ref }} - git checkout -b ${{ github.base_ref }} - git checkout ${{ github.head_ref }} + git fetch --no-tags --depth=1 origin ${{ github.event.pull_request.head.ref }} - name: Setup asv run: | @@ -38,9 +36,9 @@ jobs: run: | cd benchmarks # run asv benchmarks on the target - asv run ${{ github.base_ref }} -s 1 -b import_iris + asv run ${{ github.event.pull_request.head.ref }} -s 1 -b import_iris # run asv on the head of this PR - asv run ${{ github.head_ref }} -s 1 -b import_iris + asv run ${{ github.sha }} -s 1 -b import_iris asv publish - name: Archive asv results From a4b96ffd06fa73e4dba79887f387a70645a77602 Mon Sep 17 00:00:00 2001 From: James Penn Date: Thu, 22 Jul 2021 11:36:53 +0000 Subject: [PATCH 09/22] Trying branches instead... --- .github/workflows/benchmark.yml | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index a333fd2794..c62877f683 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -22,9 +22,15 @@ jobs: # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - uses: actions/checkout@v2 - # fetch the target branch from origin too (just the head) + # This is too complicated. Just do the slow thing for now + # and checkout with depth=0. - run: | - git fetch --no-tags --depth=1 origin ${{ github.event.pull_request.head.ref }} + # fetch the target branch from origin too (just the head) + # and check it out into a local branch + git fetch --no-tags --depth=1 origin ${{ github.base_ref }} + git checkout -b ${{ github.base_ref }} + git fetch --no-tags --depth=1 origin ${{ github.head_ref }} + git checkout -b ${{ github.head_ref }} - name: Setup asv run: | @@ -36,9 +42,9 @@ jobs: run: | cd benchmarks # run asv benchmarks on the target - asv run ${{ github.event.pull_request.head.ref }} -s 1 -b import_iris + asv run ${{ github.base_ref }} -s 1 -b import_iris # run asv on the head of this PR - asv run ${{ github.sha }} -s 1 -b import_iris + asv run ${{ github.head_ref }} -s 1 -b import_iris asv publish - name: Archive asv results From 711d56042ccedf8e0b85c445139ebe37e7ec251c Mon Sep 17 00:00:00 2001 From: James Penn Date: Thu, 22 Jul 2021 11:50:15 +0000 Subject: [PATCH 10/22] No publish, just compare --- .github/workflows/benchmark.yml | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index c62877f683..3c6ae94a29 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -22,11 +22,9 @@ jobs: # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - uses: actions/checkout@v2 - # This is too complicated. Just do the slow thing for now - # and checkout with depth=0. + # fetch the target branch from origin too (just the head) + # and check it out into a local branch - run: | - # fetch the target branch from origin too (just the head) - # and check it out into a local branch git fetch --no-tags --depth=1 origin ${{ github.base_ref }} git checkout -b ${{ github.base_ref }} git fetch --no-tags --depth=1 origin ${{ github.head_ref }} @@ -45,12 +43,12 @@ jobs: asv run ${{ github.base_ref }} -s 1 -b import_iris # run asv on the head of this PR asv run ${{ github.head_ref }} -s 1 -b import_iris - asv publish + asv compare ${{ github.base_ref }} ${{ github.head_ref }} > .asv/compare.log + asv compare --only-changed ${{ github.base_ref }} ${{ github.head_ref }} - name: Archive asv results uses: actions/upload-artifact@v2 with: name: asv-report path: | - benchmarks/.asv/html benchmarks/.asv/results \ No newline at end of file From 83cf20d163503d1238e7082e7435d96d370ce23b Mon Sep 17 00:00:00 2001 From: James Penn Date: Thu, 22 Jul 2021 11:57:29 +0000 Subject: [PATCH 11/22] Doc change in benchmark yml --- .github/workflows/benchmark.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 3c6ae94a29..260ef8d997 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -24,7 +24,8 @@ jobs: # fetch the target branch from origin too (just the head) # and check it out into a local branch - - run: | + - name: Checkout PR and target branch + run: | git fetch --no-tags --depth=1 origin ${{ github.base_ref }} git checkout -b ${{ github.base_ref }} git fetch --no-tags --depth=1 origin ${{ github.head_ref }} From cd929b7c3b7c66a751b1725da0d3218b70bc8516 Mon Sep 17 00:00:00 2001 From: James Penn Date: Thu, 22 Jul 2021 12:56:41 +0000 Subject: [PATCH 12/22] Simpler CondaLock environment --- benchmarks/conda_lock_plugin.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/benchmarks/conda_lock_plugin.py b/benchmarks/conda_lock_plugin.py index 1bba19728a..1107873c3e 100644 --- a/benchmarks/conda_lock_plugin.py +++ b/benchmarks/conda_lock_plugin.py @@ -27,10 +27,10 @@ def _uninstall_project(self): if self._get_installed_commit_hash(): # we can only run the uninstall command if an environment has already # been made before, otherwise there is no python to use to uninstall - return super()._uninstall_project() + super()._uninstall_project() # TODO: we probably want to conda uninstall all the packages too # something like: - # conda list | sed /^#/d | cut -f 1 -d " " | xargs conda uninstall + # conda list --no-pip | sed /^#/d | cut -f 1 -d " " | xargs conda uninstall def _setup(self): # create the shell of a conda environment, that includes no packages @@ -45,9 +45,4 @@ def _build_project(self, repo, commit_hash, build_dir): # in the stdout log. log.warning(f"Environment {self.name} updated to spec at {commit_hash[:8]}") log.debug(self.run_executable(_find_conda(), ["list", "-p", self._path])) - # self._build_command = "" - # return super()._build_project(repo, commit_hash, build_dir) - - def _install_project(self, repo, commit_hash, build_dir): - self._install_command = "pip install --no-deps --editable {build_dir}" - return super()._install_project(repo, commit_hash, build_dir) \ No newline at end of file + return super()._build_project(repo, commit_hash, build_dir) \ No newline at end of file From f3a34994c4f881110390cda260d24147ac6f24f4 Mon Sep 17 00:00:00 2001 From: James Penn Date: Thu, 22 Jul 2021 12:57:25 +0000 Subject: [PATCH 13/22] Simpler checkout in workflow --- .github/workflows/benchmark.yml | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 260ef8d997..0baaaa25d4 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -21,15 +21,17 @@ jobs: steps: # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - uses: actions/checkout@v2 + with: + depth: 0 - # fetch the target branch from origin too (just the head) - # and check it out into a local branch - - name: Checkout PR and target branch - run: | - git fetch --no-tags --depth=1 origin ${{ github.base_ref }} - git checkout -b ${{ github.base_ref }} - git fetch --no-tags --depth=1 origin ${{ github.head_ref }} - git checkout -b ${{ github.head_ref }} + # This is too complicated to get working for now. + # + # # fetch the target branch from origin too (just the head) + # # and check it out into a local branch + # - name: Checkout PR and target branch + # run: | + # git fetch --no-tags --depth=1 origin ${{ github.base_ref }} + # git fetch --no-tags --depth=1 origin ${{ github.head_ref }} - name: Setup asv run: | @@ -52,4 +54,5 @@ jobs: with: name: asv-report path: | - benchmarks/.asv/results \ No newline at end of file + benchmarks/.asv/results + benchmarks/.asv/ \ No newline at end of file From e6319f5a526682e48891b1cfe52605699c3e466c Mon Sep 17 00:00:00 2001 From: James Penn Date: Thu, 22 Jul 2021 12:59:07 +0000 Subject: [PATCH 14/22] Simpler checkout in workflow --- .github/workflows/benchmark.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 0baaaa25d4..d3e916a6ce 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -22,7 +22,7 @@ jobs: # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - uses: actions/checkout@v2 with: - depth: 0 + fetch-depth: 0 # This is too complicated to get working for now. # From 30c2442af63b858295be6b05b0acd7cf6d495c62 Mon Sep 17 00:00:00 2001 From: James Penn Date: Thu, 22 Jul 2021 13:07:18 +0000 Subject: [PATCH 15/22] Still fussing about with commit ids --- .github/workflows/benchmark.yml | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index d3e916a6ce..b5ee556ba8 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -43,11 +43,13 @@ jobs: run: | cd benchmarks # run asv benchmarks on the target - asv run ${{ github.base_ref }} -s 1 -b import_iris + export BASE=origin/${{ github.base_ref }} + export HEAD=origin/${{ github.head_ref }} + asv run $BASE -s 1 -b import_iris # run asv on the head of this PR - asv run ${{ github.head_ref }} -s 1 -b import_iris - asv compare ${{ github.base_ref }} ${{ github.head_ref }} > .asv/compare.log - asv compare --only-changed ${{ github.base_ref }} ${{ github.head_ref }} + asv run $HEAD -s 1 -b import_iris + asv compare $BASE $HEAD > .asv/compare.log + asv compare --only-changed $BASE $HEAD - name: Archive asv results uses: actions/upload-artifact@v2 @@ -55,4 +57,4 @@ jobs: name: asv-report path: | benchmarks/.asv/results - benchmarks/.asv/ \ No newline at end of file + benchmarks/.asv/compare.log \ No newline at end of file From 605cc83f6fe1d57cdf1caa6c55e5dd37e6e257c5 Mon Sep 17 00:00:00 2001 From: James Penn Date: Thu, 22 Jul 2021 13:23:42 +0000 Subject: [PATCH 16/22] Set the action to fail on performance degradation --- .github/workflows/benchmark.yml | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index b5ee556ba8..ff93a11b8d 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -45,9 +45,9 @@ jobs: # run asv benchmarks on the target export BASE=origin/${{ github.base_ref }} export HEAD=origin/${{ github.head_ref }} - asv run $BASE -s 1 -b import_iris + asv run $BASE -s 1 # run asv on the head of this PR - asv run $HEAD -s 1 -b import_iris + asv run $HEAD -s 1 asv compare $BASE $HEAD > .asv/compare.log asv compare --only-changed $BASE $HEAD @@ -57,4 +57,16 @@ jobs: name: asv-report path: | benchmarks/.asv/results - benchmarks/.asv/compare.log \ No newline at end of file + benchmarks/.asv/compare.log + + - name: Fail if changes + run: | + export BASE=origin/${{ github.base_ref }} + export HEAD=origin/${{ github.head_ref }} + asv compare -s $BASE $HEAD > compare.txt + cat compare.txt + if grep -q "Benchmarks that have got worse" compare.txt; then + echo "Performance degradation. See action artifact for full results." + exit 1 + fi + From d40b06b90c6b2b64f70c3f8835f11dadc3f77171 Mon Sep 17 00:00:00 2001 From: James Penn Date: Thu, 22 Jul 2021 14:10:03 +0000 Subject: [PATCH 17/22] Improviing workflow errorr messages --- .github/workflows/benchmark.yml | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index ff93a11b8d..ee7cc8827f 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -42,14 +42,14 @@ jobs: - name: Run benchmarks on source and target run: | cd benchmarks - # run asv benchmarks on the target export BASE=origin/${{ github.base_ref }} export HEAD=origin/${{ github.head_ref }} - asv run $BASE -s 1 + # run asv benchmarks on the target + # -s 1 ensures only one "step" (the tip of the branch) is run + asv run $BASE -s 1 -b cube.Cube # run asv on the head of this PR - asv run $HEAD -s 1 - asv compare $BASE $HEAD > .asv/compare.log - asv compare --only-changed $BASE $HEAD + asv run $HEAD -s 1 -b cube.Cube + asv compare -s $BASE $HEAD > .asv/compare.txt - name: Archive asv results uses: actions/upload-artifact@v2 @@ -57,16 +57,14 @@ jobs: name: asv-report path: | benchmarks/.asv/results - benchmarks/.asv/compare.log + benchmarks/.asv/compare.txt - - name: Fail if changes + - name: Fail if performance degraded run: | - export BASE=origin/${{ github.base_ref }} - export HEAD=origin/${{ github.head_ref }} - asv compare -s $BASE $HEAD > compare.txt - cat compare.txt - if grep -q "Benchmarks that have got worse" compare.txt; then - echo "Performance degradation. See action artifact for full results." + cd benchmarks + cat .asv/compare.txt + if grep -q "Benchmarks that have got worse" .asv/compare.txt; then + echo "::error Performance degradation. See action artifact for full results." exit 1 fi From 000acbe4f098045d69c7237269fec8a7c168d24c Mon Sep 17 00:00:00 2001 From: James Penn Date: Thu, 22 Jul 2021 14:16:01 +0000 Subject: [PATCH 18/22] Run all benchmarks --- .github/workflows/benchmark.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index ee7cc8827f..8a0a42f747 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -46,9 +46,9 @@ jobs: export HEAD=origin/${{ github.head_ref }} # run asv benchmarks on the target # -s 1 ensures only one "step" (the tip of the branch) is run - asv run $BASE -s 1 -b cube.Cube + asv run $BASE -s 1 # run asv on the head of this PR - asv run $HEAD -s 1 -b cube.Cube + asv run $HEAD -s 1 asv compare -s $BASE $HEAD > .asv/compare.txt - name: Archive asv results @@ -64,7 +64,7 @@ jobs: cd benchmarks cat .asv/compare.txt if grep -q "Benchmarks that have got worse" .asv/compare.txt; then - echo "::error Performance degradation. See action artifact for full results." + echo "::error::Performance degradation. See action artifact for full results." exit 1 fi From 26bd5fb62add8765a1c5567c7c1e4dad6fd275ce Mon Sep 17 00:00:00 2001 From: James Penn Date: Thu, 22 Jul 2021 15:44:38 +0100 Subject: [PATCH 19/22] Update cube.py --- lib/iris/cube.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/iris/cube.py b/lib/iris/cube.py index a15951900b..0b624e8308 100644 --- a/lib/iris/cube.py +++ b/lib/iris/cube.py @@ -888,6 +888,10 @@ def __init__( ... (longitude, 1)]) """ + # an ill advised change... + import time + time.sleep(0.2) + # Temporary error while we transition the API. if isinstance(data, str): raise TypeError("Invalid data type: {!r}.".format(data)) From d43d6bee5854deb3fe0ab5e60d1713d14a50e102 Mon Sep 17 00:00:00 2001 From: James Penn Date: Fri, 23 Jul 2021 10:44:03 +0100 Subject: [PATCH 20/22] Update cube.py --- lib/iris/cube.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/iris/cube.py b/lib/iris/cube.py index 0b624e8308..cebf0a2cd9 100644 --- a/lib/iris/cube.py +++ b/lib/iris/cube.py @@ -890,7 +890,7 @@ def __init__( """ # an ill advised change... import time - time.sleep(0.2) + time.sleep(0.25) # Temporary error while we transition the API. if isinstance(data, str): From 12809a728b04cf203691f33782af76ec055766c2 Mon Sep 17 00:00:00 2001 From: James Penn Date: Fri, 23 Jul 2021 10:58:12 +0100 Subject: [PATCH 21/22] Update cube.py --- lib/iris/cube.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/iris/cube.py b/lib/iris/cube.py index cebf0a2cd9..0b624e8308 100644 --- a/lib/iris/cube.py +++ b/lib/iris/cube.py @@ -890,7 +890,7 @@ def __init__( """ # an ill advised change... import time - time.sleep(0.25) + time.sleep(0.2) # Temporary error while we transition the API. if isinstance(data, str): From 4c3fe80401089fb30721d3cbdf98f235ed32418f Mon Sep 17 00:00:00 2001 From: James Penn Date: Fri, 23 Jul 2021 11:16:17 +0100 Subject: [PATCH 22/22] Fixed the regression --- lib/iris/cube.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/lib/iris/cube.py b/lib/iris/cube.py index 0b624e8308..724b251620 100644 --- a/lib/iris/cube.py +++ b/lib/iris/cube.py @@ -888,9 +888,7 @@ def __init__( ... (longitude, 1)]) """ - # an ill advised change... - import time - time.sleep(0.2) + # removed the sleep # Temporary error while we transition the API. if isinstance(data, str):