diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 97dff666cf..228970bee2 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -29,7 +29,7 @@ repos: - id: no-commit-to-branch - repo: https://github.com/psf/black - rev: 21.12b0 + rev: 22.1.0 hooks: - id: black pass_filenames: false @@ -50,7 +50,7 @@ repos: args: [--filter-files] - repo: https://github.com/asottile/blacken-docs - rev: v1.12.0 + rev: v1.12.1 hooks: - id: blacken-docs types: [file, rst] diff --git a/benchmarks/benchmarks/plot.py b/benchmarks/benchmarks/plot.py index 45905abd2f..24899776dc 100644 --- a/benchmarks/benchmarks/plot.py +++ b/benchmarks/benchmarks/plot.py @@ -22,7 +22,7 @@ def setup(self): # Should generate 10 distinct contours, regardless of dim size. dim_size = int(ARTIFICIAL_DIM_SIZE / 5) repeat_number = int(dim_size / 10) - repeat_range = range(int((dim_size ** 2) / repeat_number)) + repeat_range = range(int((dim_size**2) / repeat_number)) data = np.repeat(repeat_range, repeat_number) data = data.reshape((dim_size,) * 2) diff --git a/docs/gallery_code/meteorology/plot_wind_barbs.py b/docs/gallery_code/meteorology/plot_wind_barbs.py index c3c056eb4a..b09040c64e 100644 --- a/docs/gallery_code/meteorology/plot_wind_barbs.py +++ b/docs/gallery_code/meteorology/plot_wind_barbs.py @@ -30,7 +30,7 @@ def main(): # To illustrate the full range of barbs, scale the wind speed up to pretend # that a storm is passing over - magnitude = (uwind ** 2 + vwind ** 2) ** 0.5 + magnitude = (uwind**2 + vwind**2) ** 0.5 magnitude.convert_units("knot") max_speed = magnitude.collapsed( ("latitude", "longitude"), iris.analysis.MAX @@ -41,7 +41,7 @@ def main(): vwind = vwind / max_speed * max_desired # Create a cube containing the wind speed - windspeed = (uwind ** 2 + vwind ** 2) ** 0.5 + windspeed = (uwind**2 + vwind**2) ** 0.5 windspeed.rename("windspeed") windspeed.convert_units("knot") diff --git a/docs/gallery_code/meteorology/plot_wind_speed.py b/docs/gallery_code/meteorology/plot_wind_speed.py index fd03f54205..40d9d0da00 100644 --- a/docs/gallery_code/meteorology/plot_wind_speed.py +++ b/docs/gallery_code/meteorology/plot_wind_speed.py @@ -27,7 +27,7 @@ def main(): vwind = iris.load_cube(infile, "y_wind") # Create a cube containing the wind speed. - windspeed = (uwind ** 2 + vwind ** 2) ** 0.5 + windspeed = (uwind**2 + vwind**2) ** 0.5 windspeed.rename("windspeed") # Plot the wind speed as a contour plot. diff --git a/docs/src/whatsnew/dev.rst b/docs/src/whatsnew/3.2.rst similarity index 98% rename from docs/src/whatsnew/dev.rst rename to docs/src/whatsnew/3.2.rst index e2d4c2bc0b..c78e1283d6 100644 --- a/docs/src/whatsnew/dev.rst +++ b/docs/src/whatsnew/3.2.rst @@ -1,13 +1,13 @@ .. include:: ../common_links.inc -|iris_version| |build_date| [unreleased] -**************************************** +v3.2 (31 Jan 2022) [unreleased] +******************************* This document explains the changes made to Iris for this release (:doc:`View all changes `.) -.. dropdown:: :opticon:`report` |iris_version| Release Highlights +.. dropdown:: :opticon:`report` v3.2.0 Release Highlights :container: + shadow :title: text-primary text-center font-weight-bold :body: bg-light @@ -18,8 +18,7 @@ This document explains the changes made to Iris for this release * We've added experimental support for :ref:`Meshes `, which can now be loaded and - attached to a cube. Mesh support is based on the based on `CF-UGRID`_ - model. + attached to a cube. Mesh support is based on the `CF-UGRID`_ model. * We've also dropped support for ``Python 3.7``. And finally, get in touch with us on :issue:`GitHub` if you have diff --git a/docs/src/whatsnew/dev.rst.template b/docs/src/whatsnew/dev.rst.template deleted file mode 100644 index 79c578ca65..0000000000 --- a/docs/src/whatsnew/dev.rst.template +++ /dev/null @@ -1,112 +0,0 @@ -.. include:: ../common_links.inc - -|iris_version| |build_date| [unreleased] -**************************************** - -This document explains the changes made to Iris for this release -(:doc:`View all changes `.) - - -.. dropdown:: :opticon:`report` |iris_version| Release Highlights - :container: + shadow - :title: text-primary text-center font-weight-bold - :body: bg-light - :animate: fade-in - :open: - - The highlights for this major/minor release of Iris include: - - * N/A - - And finally, get in touch with us on :issue:`GitHub` if you have - any issues or feature requests for improving Iris. Enjoy! - - -NOTE: section below is a template for bugfix patches -==================================================== - (Please remove this section when creating an initial 'latest.rst') - -v3.X.X (DD MMM YYYY) -==================== - -.. dropdown:: :opticon:`alert` v3.X.X Patches - :container: + shadow - :title: text-primary text-center font-weight-bold - :body: bg-light - :animate: fade-in - - The patches in this release of Iris include: - - #. N/A - -NOTE: section above is a template for bugfix patches -==================================================== - (Please remove this section when creating an initial 'latest.rst') - - - -📢 Announcements -================ - -#. N/A - - -✨ Features -=========== - -#. N/A - - -🐛 Bugs Fixed -============= - -#. N/A - - -💣 Incompatible Changes -======================= - -#. N/A - - -🚀 Performance Enhancements -=========================== - -#. N/A - - -🔥 Deprecations -=============== - -#. N/A - - -🔗 Dependencies -=============== - -#. N/A - - -📚 Documentation -================ - -#. N/A - - -💼 Internal -=========== - -#. N/A - - -.. comment - Whatsnew author names (@github name) in alphabetical order. Note that, - core dev names are automatically included by the common_links.inc: - - - - -.. comment - Whatsnew resources in alphabetical order: - - diff --git a/docs/src/whatsnew/index.rst b/docs/src/whatsnew/index.rst index 51f03e8d8f..f425e649b9 100644 --- a/docs/src/whatsnew/index.rst +++ b/docs/src/whatsnew/index.rst @@ -10,7 +10,7 @@ Iris versions. .. toctree:: :maxdepth: 1 - dev.rst + 3.2.rst 3.1.rst 3.0.rst 2.4.rst diff --git a/docs/src/whatsnew/latest.rst b/docs/src/whatsnew/latest.rst index 56aebe92dd..2bdbea5d85 120000 --- a/docs/src/whatsnew/latest.rst +++ b/docs/src/whatsnew/latest.rst @@ -1 +1 @@ -dev.rst \ No newline at end of file +3.2.rst \ No newline at end of file diff --git a/lib/iris/__init__.py b/lib/iris/__init__.py index 26f03c0566..aca4e77e88 100644 --- a/lib/iris/__init__.py +++ b/lib/iris/__init__.py @@ -104,7 +104,7 @@ def callback(cube, field, filename): # Iris revision. -__version__ = "3.2.dev0" +__version__ = "3.2.0rc0" # Restrict the names imported when using "from iris import *" __all__ = [ diff --git a/lib/iris/analysis/__init__.py b/lib/iris/analysis/__init__.py index 465a521065..b1a9e1d259 100644 --- a/lib/iris/analysis/__init__.py +++ b/lib/iris/analysis/__init__.py @@ -1394,7 +1394,7 @@ def _lazy_rms(array, axis, **kwargs): # all. Thus trying to use this aggregator with weights will currently # raise an error in dask due to the unexpected keyword `weights`, # rather than silently returning the wrong answer. - return da.sqrt(da.mean(array ** 2, axis=axis, **kwargs)) + return da.sqrt(da.mean(array**2, axis=axis, **kwargs)) @_build_dask_mdtol_function diff --git a/lib/iris/analysis/_grid_angles.py b/lib/iris/analysis/_grid_angles.py index 127aec7c1e..0b52f54568 100644 --- a/lib/iris/analysis/_grid_angles.py +++ b/lib/iris/analysis/_grid_angles.py @@ -120,7 +120,7 @@ def _angle(p, q, r): mid_lons = np.deg2rad(q[0]) pr = _3d_xyz_from_latlon(r[0], r[1]) - _3d_xyz_from_latlon(p[0], p[1]) - pr_norm = np.sqrt(np.sum(pr ** 2, axis=0)) + pr_norm = np.sqrt(np.sum(pr**2, axis=0)) pr_top = pr[1] * np.cos(mid_lons) - pr[0] * np.sin(mid_lons) index = pr_norm == 0 diff --git a/lib/iris/analysis/_scipy_interpolate.py b/lib/iris/analysis/_scipy_interpolate.py index c6b33c56a4..fc64249729 100644 --- a/lib/iris/analysis/_scipy_interpolate.py +++ b/lib/iris/analysis/_scipy_interpolate.py @@ -229,7 +229,7 @@ def compute_interp_weights(self, xi, method=None): xi_shape, method, indices, norm_distances, out_of_bounds = prepared # Allocate arrays for describing the sparse matrix. - n_src_values_per_result_value = 2 ** ndim + n_src_values_per_result_value = 2**ndim n_result_values = len(indices[0]) n_non_zero = n_result_values * n_src_values_per_result_value weights = np.ones(n_non_zero, dtype=norm_distances[0].dtype) diff --git a/lib/iris/analysis/calculus.py b/lib/iris/analysis/calculus.py index 409782f256..4630f47967 100644 --- a/lib/iris/analysis/calculus.py +++ b/lib/iris/analysis/calculus.py @@ -629,14 +629,10 @@ def curl(i_cube, j_cube, k_cube=None): # (d/dtheta (i_cube * sin(lat)) - d_j_cube_dphi) # phi_cmpt = 1/r * ( d/dr (r * j_cube) - d_k_cube_dtheta) # theta_cmpt = 1/r * ( 1/cos(lat) * d_k_cube_dphi - d/dr (r * i_cube) - if ( - y_coord.name() - not in [ - "latitude", - "grid_latitude", - ] - or x_coord.name() not in ["longitude", "grid_longitude"] - ): + if y_coord.name() not in [ + "latitude", + "grid_latitude", + ] or x_coord.name() not in ["longitude", "grid_longitude"]: raise ValueError( "Expecting latitude as the y coord and " "longitude as the x coord for spherical curl." diff --git a/lib/iris/analysis/cartography.py b/lib/iris/analysis/cartography.py index 373487af53..f704468e33 100644 --- a/lib/iris/analysis/cartography.py +++ b/lib/iris/analysis/cartography.py @@ -335,7 +335,7 @@ def _quadrant_area(radian_lat_bounds, radian_lon_bounds, radius_of_earth): raise ValueError("Bounds must be [n,2] array") # fill in a new array of areas - radius_sqr = radius_of_earth ** 2 + radius_sqr = radius_of_earth**2 radian_lat_64 = radian_lat_bounds.astype(np.float64) radian_lon_64 = radian_lon_bounds.astype(np.float64) @@ -1010,8 +1010,8 @@ def _transform_distance_vectors_tolerance_mask( # Squared magnitudes should be equal to one within acceptable tolerance. # A value of atol=2e-3 is used, which corresponds to a change in magnitude # of approximately 0.1%. - sqmag_1_0 = u_one_t ** 2 + v_zero_t ** 2 - sqmag_0_1 = u_zero_t ** 2 + v_one_t ** 2 + sqmag_1_0 = u_one_t**2 + v_zero_t**2 + sqmag_0_1 = u_zero_t**2 + v_one_t**2 mask = np.logical_not( np.logical_and( np.isclose(sqmag_1_0, ones, atol=2e-3), diff --git a/lib/iris/analysis/maths.py b/lib/iris/analysis/maths.py index 107d964ed4..1cbc90cc60 100644 --- a/lib/iris/analysis/maths.py +++ b/lib/iris/analysis/maths.py @@ -540,7 +540,7 @@ def power(data, out=None): return _math_op_common( cube, power, - cube.units ** exponent, + cube.units**exponent, new_dtype=new_dtype, in_place=in_place, ) diff --git a/lib/iris/analysis/stats.py b/lib/iris/analysis/stats.py index 89dde1818b..711e3c5bfb 100644 --- a/lib/iris/analysis/stats.py +++ b/lib/iris/analysis/stats.py @@ -168,10 +168,10 @@ def _ones_like(cube): covar = (s1 * s2).collapsed( corr_coords, iris.analysis.SUM, weights=weights_1, mdtol=mdtol ) - var_1 = (s1 ** 2).collapsed( + var_1 = (s1**2).collapsed( corr_coords, iris.analysis.SUM, weights=weights_1 ) - var_2 = (s2 ** 2).collapsed( + var_2 = (s2**2).collapsed( corr_coords, iris.analysis.SUM, weights=weights_2 ) diff --git a/lib/iris/fileformats/netcdf.py b/lib/iris/fileformats/netcdf.py index 100ab29daa..73a137b4af 100644 --- a/lib/iris/fileformats/netcdf.py +++ b/lib/iris/fileformats/netcdf.py @@ -2738,9 +2738,9 @@ def _create_cf_data_variable( cmin, cmax = _co_realise_lazy_arrays([cmin, cmax]) n = dtype.itemsize * 8 if masked: - scale_factor = (cmax - cmin) / (2 ** n - 2) + scale_factor = (cmax - cmin) / (2**n - 2) else: - scale_factor = (cmax - cmin) / (2 ** n - 1) + scale_factor = (cmax - cmin) / (2**n - 1) if dtype.kind == "u": add_offset = cmin elif dtype.kind == "i": diff --git a/lib/iris/fileformats/pp.py b/lib/iris/fileformats/pp.py index 9f213ec4db..9bda98bf61 100644 --- a/lib/iris/fileformats/pp.py +++ b/lib/iris/fileformats/pp.py @@ -403,7 +403,7 @@ def _calculate_str_value_from_value(self): def _calculate_value_from_str_value(self): self._value = np.sum( - [10 ** i * val for i, val in enumerate(self._strvalue)] + [10**i * val for i, val in enumerate(self._strvalue)] ) def __len__(self): @@ -418,7 +418,7 @@ def __getitem__(self, key): # if the key returns a list of values, then combine them together # to an integer if isinstance(val, list): - val = sum([10 ** i * val for i, val in enumerate(val)]) + val = sum([10**i * val for i, val in enumerate(val)]) return val diff --git a/lib/iris/tests/integration/test_netcdf.py b/lib/iris/tests/integration/test_netcdf.py index f7aaa1d05c..2a45561e17 100644 --- a/lib/iris/tests/integration/test_netcdf.py +++ b/lib/iris/tests/integration/test_netcdf.py @@ -416,7 +416,7 @@ def setUp(self): levels.units = "centimeters" levels.positive = "down" levels.axis = "Z" - levels[:] = np.linspace(0, 10 ** 5, 3) + levels[:] = np.linspace(0, 10**5, 3) volcello.id = "volcello" volcello.out_name = "volcello" @@ -507,9 +507,9 @@ def _get_scale_factor_add_offset(cube, datatype): else: masked = False if masked: - scale_factor = (cmax - cmin) / (2 ** n - 2) + scale_factor = (cmax - cmin) / (2**n - 2) else: - scale_factor = (cmax - cmin) / (2 ** n - 1) + scale_factor = (cmax - cmin) / (2**n - 1) if dt.kind == "u": add_offset = cmin elif dt.kind == "i": diff --git a/lib/iris/tests/test_basic_maths.py b/lib/iris/tests/test_basic_maths.py index e753adbae8..24f2b89442 100644 --- a/lib/iris/tests/test_basic_maths.py +++ b/lib/iris/tests/test_basic_maths.py @@ -249,7 +249,7 @@ def test_apply_ufunc(self): np.square, a, new_name="squared temperature", - new_unit=a.units ** 2, + new_unit=a.units**2, in_place=False, ) self.assertCMLApproxData(a, ("analysis", "apply_ufunc_original.cml")) @@ -259,14 +259,14 @@ def test_apply_ufunc(self): np.square, a, new_name="squared temperature", - new_unit=a.units ** 2, + new_unit=a.units**2, in_place=True, ) self.assertCMLApproxData(b, ("analysis", "apply_ufunc.cml")) self.assertCMLApproxData(a, ("analysis", "apply_ufunc.cml")) def vec_mag(u, v): - return math.sqrt(u ** 2 + v ** 2) + return math.sqrt(u**2 + v**2) c = a.copy() + 2 @@ -295,7 +295,7 @@ def test_apply_ufunc_fail(self): def test_ifunc(self): a = self.cube - my_ifunc = iris.analysis.maths.IFunc(np.square, lambda a: a.units ** 2) + my_ifunc = iris.analysis.maths.IFunc(np.square, lambda a: a.units**2) b = my_ifunc(a, new_name="squared temperature", in_place=False) self.assertCMLApproxData(a, ("analysis", "apply_ifunc_original.cml")) @@ -307,7 +307,7 @@ def test_ifunc(self): self.assertCMLApproxData(a, ("analysis", "apply_ifunc.cml")) def vec_mag(u, v): - return math.sqrt(u ** 2 + v ** 2) + return math.sqrt(u**2 + v**2) c = a.copy() + 2 @@ -347,7 +347,7 @@ def test_ifunc_init_fail(self): def test_ifunc_call_fail(self): a = self.cube - my_ifunc = iris.analysis.maths.IFunc(np.square, lambda a: a.units ** 2) + my_ifunc = iris.analysis.maths.IFunc(np.square, lambda a: a.units**2) # should now NOT fail because giving 2 arguments to an ifunc that # expects only one will now ignore the surplus argument and raise @@ -367,7 +367,7 @@ def test_ifunc_call_fail(self): my_ifunc(a) my_ifunc = iris.analysis.maths.IFunc( - lambda a: (a, a ** 2.0), lambda cube: cf_units.Unit("1") + lambda a: (a, a**2.0), lambda cube: cf_units.Unit("1") ) # should fail because data function returns a tuple @@ -553,9 +553,9 @@ def test_square_root(self): a.data = abs(a.data) a.units **= 2 - e = a ** 0.5 + e = a**0.5 - self.assertArrayAllClose(e.data, a.data ** 0.5) + self.assertArrayAllClose(e.data, a.data**0.5) self.assertCML(e, ("analysis", "sqrt.cml"), checksum=False) self.assertRaises(ValueError, iris.analysis.maths.exponentiate, a, 0.3) @@ -585,26 +585,26 @@ def test_apply_ufunc(self): np.square, a, new_name="more_thingness", - new_unit=a.units ** 2, + new_unit=a.units**2, in_place=False, ) - ans = a.data ** 2 + ans = a.data**2 self.assertArrayEqual(b.data, ans) self.assertEqual(b.name(), "more_thingness") self.assertEqual(b.units, cf_units.Unit("m^2")) def vec_mag(u, v): - return math.sqrt(u ** 2 + v ** 2) + return math.sqrt(u**2 + v**2) c = a.copy() + 2 vec_mag_ufunc = np.frompyfunc(vec_mag, 2, 1) b = iris.analysis.maths.apply_ufunc(vec_mag_ufunc, a, c) - ans = a.data ** 2 + c.data ** 2 - b2 = b ** 2 + ans = a.data**2 + c.data**2 + b2 = b**2 self.assertArrayAlmostEqual(b2.data, ans) @@ -617,17 +617,17 @@ def test_ifunc(self): a = self.cube a.units = cf_units.Unit("meters") - my_ifunc = iris.analysis.maths.IFunc(np.square, lambda x: x.units ** 2) + my_ifunc = iris.analysis.maths.IFunc(np.square, lambda x: x.units**2) b = my_ifunc(a, new_name="more_thingness", in_place=False) - ans = a.data ** 2 + ans = a.data**2 self.assertArrayEqual(b.data, ans) self.assertEqual(b.name(), "more_thingness") self.assertEqual(b.units, cf_units.Unit("m^2")) def vec_mag(u, v): - return math.sqrt(u ** 2 + v ** 2) + return math.sqrt(u**2 + v**2) c = a.copy() + 2 @@ -637,12 +637,12 @@ def vec_mag(u, v): ) b = my_ifunc(a, c) - ans = (a.data ** 2 + c.data ** 2) ** 0.5 + ans = (a.data**2 + c.data**2) ** 0.5 self.assertArrayAlmostEqual(b.data, ans) def vec_mag_data_func(u_data, v_data): - return np.sqrt(u_data ** 2 + v_data ** 2) + return np.sqrt(u_data**2 + v_data**2) vec_mag_ifunc = iris.analysis.maths.IFunc( vec_mag_data_func, lambda a, b: (a + b).units diff --git a/lib/iris/tests/unit/analysis/cartography/test_rotate_winds.py b/lib/iris/tests/unit/analysis/cartography/test_rotate_winds.py index 9e3af90603..eafaa20ec8 100644 --- a/lib/iris/tests/unit/analysis/cartography/test_rotate_winds.py +++ b/lib/iris/tests/unit/analysis/cartography/test_rotate_winds.py @@ -343,8 +343,8 @@ def test_orig_coords(self): def test_magnitude_preservation(self): u, v = self._uv_cubes_limited_extent() ut, vt = rotate_winds(u, v, iris.coord_systems.OSGB()) - orig_sq_mag = u.data ** 2 + v.data ** 2 - res_sq_mag = ut.data ** 2 + vt.data ** 2 + orig_sq_mag = u.data**2 + v.data**2 + res_sq_mag = ut.data**2 + vt.data**2 self.assertArrayAllClose(orig_sq_mag, res_sq_mag, rtol=5e-4) def test_data_values(self): @@ -437,9 +437,9 @@ def test_rotated_to_osgb(self): self.assertArrayEqual(expected_mask, vt.data.mask) # Check unmasked values have sufficiently small error in mag. - expected_mag = np.sqrt(u.data ** 2 + v.data ** 2) + expected_mag = np.sqrt(u.data**2 + v.data**2) # Use underlying data to ignore mask in calculation. - res_mag = np.sqrt(ut.data.data ** 2 + vt.data.data ** 2) + res_mag = np.sqrt(ut.data.data**2 + vt.data.data**2) # Calculate percentage error (note there are no zero magnitudes # so we can divide safely). anom = 100.0 * np.abs(res_mag - expected_mag) / expected_mag diff --git a/lib/iris/tests/unit/analysis/regrid/test_RectilinearRegridder.py b/lib/iris/tests/unit/analysis/regrid/test_RectilinearRegridder.py index f0dba83748..a018507fb3 100644 --- a/lib/iris/tests/unit/analysis/regrid/test_RectilinearRegridder.py +++ b/lib/iris/tests/unit/analysis/regrid/test_RectilinearRegridder.py @@ -33,7 +33,7 @@ def setUp(self): self.xs, self.ys = np.meshgrid(self.x.points, self.y.points) def transformation(x, y): - return x + y ** 2 + return x + y**2 # Construct a function which adds dimensions to the 2D data array # so that we can test higher dimensional functionality. diff --git a/lib/iris/tests/unit/coord_categorisation/test_add_categorised_coord.py b/lib/iris/tests/unit/coord_categorisation/test_add_categorised_coord.py index b7c59ff566..0c20f16f5a 100644 --- a/lib/iris/tests/unit/coord_categorisation/test_add_categorised_coord.py +++ b/lib/iris/tests/unit/coord_categorisation/test_add_categorised_coord.py @@ -36,7 +36,7 @@ def test_vectorise_call(self): # The reason we use numpy.vectorize is to support multi-dimensional # coordinate points. def fn(coord, v): - return v ** 2 + return v**2 with mock.patch( "numpy.vectorize", return_value=self.vectorised diff --git a/lib/iris/tests/unit/fileformats/pp_load_rules/test__collapse_degenerate_points_and_bounds.py b/lib/iris/tests/unit/fileformats/pp_load_rules/test__collapse_degenerate_points_and_bounds.py index 0f2a8a2d4b..c9c4821e0a 100644 --- a/lib/iris/tests/unit/fileformats/pp_load_rules/test__collapse_degenerate_points_and_bounds.py +++ b/lib/iris/tests/unit/fileformats/pp_load_rules/test__collapse_degenerate_points_and_bounds.py @@ -65,7 +65,7 @@ def test_3d(self): def test_multiple_odd_dims(self): # Test to ensure multiple collapsed dimensions don't interfere. # make a 5-D array where dimensions 0, 2 and 3 are degenerate. - array = np.arange(3 ** 5).reshape([3] * 5) + array = np.arange(3**5).reshape([3] * 5) array[1:] = array[0:1] array[:, :, 1:] = array[:, :, 0:1] array[:, :, :, 1:] = array[:, :, :, 0:1]