Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 9 additions & 4 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ install:
# Conda-forge versioning is out of order (0.9.* is later than 2.12.*).
- >
if [[ "${TEST_MINIMAL}" != true ]]; then
conda install --quiet -n ${ENV_NAME} python-eccodes=0.9.3;
conda install --quiet -n ${ENV_NAME} python-eccodes">=0.9.1, <2";
conda install --quiet -n ${ENV_NAME} --no-deps iris-grib;
fi

Expand Down Expand Up @@ -162,13 +162,18 @@ script:
fi

# Split the organisation out of the slug. See https://stackoverflow.com/a/5257398/741316 for description.
- export ORG=(${TRAVIS_REPO_SLUG//\// })
# NOTE: a *separate* "export" command appears to be necessary here : A command of the
# form "export ORG=.." failed to define ORG for the following command (?!)
- >
ORG=$(echo ${TRAVIS_REPO_SLUG} | cut -d/ -f1);
export ORG

- echo "Travis job context ORG=${ORG}; TRAVIS_EVENT_TYPE=${TRAVIS_EVENT_TYPE}; PUSH_BUILT_DOCS=${PUSH_BUILT_DOCS}"

# When we merge a change to SciTools/iris, we can push docs to github pages.
# At present, only the Python 3.7 "doctest" job does this.
# Results appear at https://scitools-docs.github.io/iris/<<branchname>>/index.html
- >
if [[ "${ORG}" == 'SciTools' && "${TRAVIS_EVENT_TYPE}" == 'push' && "${PUSH_BUILT_DOCS}" == 'true' ]]; then
- if [[ "${ORG}" == 'SciTools' && "${TRAVIS_EVENT_TYPE}" == 'push' && "${PUSH_BUILT_DOCS}" == 'true' ]]; then
cd ${INSTALL_DIR};
pip install doctr;
doctr deploy --deploy-repo SciTools-docs/iris --built-docs docs/iris/build/html
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
* Added support for the `black <https://black.readthedocs.io/en/stable/>`_ code formatter.
This is now automatically checked on GitHub PRs, replacing the older, unittest-based
"iris.tests.test_coding_standards.TestCodeFormat".
Black provides automatic code format correction for most IDEs.
See the new developer guide section on this :
https://scitools-docs.github.io/iris/master/developers_guide/code_format.html.
42 changes: 21 additions & 21 deletions lib/iris/experimental/regrid.py
Original file line number Diff line number Diff line change
Expand Up @@ -473,6 +473,24 @@ def _regrid_area_weighted_array(
grid.

"""
# Determine which grid bounds are within src extent.
y_within_bounds = _within_bounds(
src_y_bounds, grid_y_bounds, grid_y_decreasing
)
x_within_bounds = _within_bounds(
src_x_bounds, grid_x_bounds, grid_x_decreasing
)

# Cache which src_bounds are within grid bounds
cached_x_bounds = []
cached_x_indices = []
for (x_0, x_1) in grid_x_bounds:
if grid_x_decreasing:
x_0, x_1 = x_1, x_0
x_bounds, x_indices = _cropped_bounds(src_x_bounds, x_0, x_1)
cached_x_bounds.append(x_bounds)
cached_x_indices.append(x_indices)

# Create empty data array to match the new grid.
# Note that dtype is not preserved and that the array is
# masked to allow for regions that do not overlap.
Expand All @@ -497,24 +515,6 @@ def _regrid_area_weighted_array(
# Assign to mask to explode it, allowing indexed assignment.
new_data.mask = False

# Determine which grid bounds are within src extent.
y_within_bounds = _within_bounds(
src_y_bounds, grid_y_bounds, grid_y_decreasing
)
x_within_bounds = _within_bounds(
src_x_bounds, grid_x_bounds, grid_x_decreasing
)

# Cache which src_bounds are within grid bounds
cached_x_bounds = []
cached_x_indices = []
for (x_0, x_1) in grid_x_bounds:
if grid_x_decreasing:
x_0, x_1 = x_1, x_0
x_bounds, x_indices = _cropped_bounds(src_x_bounds, x_0, x_1)
cached_x_bounds.append(x_bounds)
cached_x_indices.append(x_indices)

# Axes of data over which the weighted mean is calculated.
axes = []
if y_dim is not None:
Expand Down Expand Up @@ -565,15 +565,15 @@ def _regrid_area_weighted_array(
raise RuntimeError(
"Cannot handle split bounds " "in both x and y."
)
# Calculate weights based on areas of cropped bounds.
weights = area_func(y_bounds, x_bounds)

if x_dim is not None:
indices[x_dim] = x_indices
if y_dim is not None:
indices[y_dim] = y_indices
data = src_data[tuple(indices)]

# Calculate weights based on areas of cropped bounds.
weights = area_func(y_bounds, x_bounds)

# Transpose weights to match dim ordering in data.
weights_shape_y = weights.shape[0]
weights_shape_x = weights.shape[1]
Expand Down
76 changes: 69 additions & 7 deletions lib/iris/fileformats/_pyke_rules/fc_rules_cf.krb
Original file line number Diff line number Diff line change
Expand Up @@ -498,6 +498,22 @@ fc_build_cell_measure
python engine.rule_triggered.add(rule.name)


#
# Context:
# This rule will trigger for each ancillary_variable case specific fact.
#
# Purpose:
# Add the ancillary variable to the cube.
#
fc_build_ancil_var
foreach
facts_cf.ancillary_variable($var)
assert
python ancil_var = engine.cf_var.cf_group.ancillary_variables[$var]
python build_ancil_var(engine, ancil_var)
python engine.rule_triggered.add(rule.name)


#
# Context:
# This rule will trigger iff a CF latitude coordinate exists and
Expand Down Expand Up @@ -1941,36 +1957,37 @@ fc_extras
# Add it to the cube
cube.add_aux_coord(coord, data_dims)

# Update the coordinate to CF-netCDF variable mapping.
# Make a list with names, stored on the engine, so we can find them all later.
engine.provides['coordinates'].append((coord, cf_coord_var.cf_name))


################################################################################
def build_cell_measures(engine, cf_cm_attr, coord_name=None):
def build_cell_measures(engine, cf_cm_var):
"""Create a CellMeasure instance and add it to the cube."""
cf_var = engine.cf_var
cube = engine.cube
attributes = {}

# Get units
attr_units = get_attr_units(cf_cm_attr, attributes)
attr_units = get_attr_units(cf_cm_var, attributes)

data = _get_cf_var_data(cf_cm_attr, engine.filename)
# Get (lazy) content array
data = _get_cf_var_data(cf_cm_var, engine.filename)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

For consistency, should this have a comment similar to this line?

# Get (lazy) content array
data = _get_cf_var_data(cf_av_var, engine.filename)

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ok, but "consistency" is a bit of a lost cause in this code IMHO. Not least because we have far too much code duplication code, as here !
( N.B. We do still hope to replace all of this with Python code #3415 )


# Determine the name of the dimension/s shared between the CF-netCDF data variable
# and the coordinate being built.
common_dims = [dim for dim in cf_cm_attr.dimensions
common_dims = [dim for dim in cf_cm_var.dimensions
if dim in cf_var.dimensions]
data_dims = None
if common_dims:
# Calculate the offset of each common dimension.
data_dims = [cf_var.dimensions.index(dim) for dim in common_dims]

# Determine the standard_name, long_name and var_name
standard_name, long_name, var_name = get_names(cf_cm_attr, coord_name, attributes)
standard_name, long_name, var_name = get_names(cf_cm_var, None, attributes)

# Obtain the cf_measure.
measure = cf_cm_attr.cf_measure
measure = cf_cm_var.cf_measure

# Create the CellMeasure
cell_measure = iris.coords.CellMeasure(data,
Expand All @@ -1984,6 +2001,51 @@ fc_extras
# Add it to the cube
cube.add_cell_measure(cell_measure, data_dims)

# Make a list with names, stored on the engine, so we can find them all later.
engine.provides['cell_measures'].append((cell_measure, cf_cm_var.cf_name))



################################################################################
def build_ancil_var(engine, cf_av_var):
"""Create an AncillaryVariable instance and add it to the cube."""
cf_var = engine.cf_var
cube = engine.cube
attributes = {}

# Get units
attr_units = get_attr_units(cf_av_var, attributes)

# Get (lazy) content array
data = _get_cf_var_data(cf_av_var, engine.filename)

# Determine the name of the dimension/s shared between the CF-netCDF data variable
# and the AV being built.
common_dims = [dim for dim in cf_av_var.dimensions
if dim in cf_var.dimensions]
data_dims = None
if common_dims:
# Calculate the offset of each common dimension.
data_dims = [cf_var.dimensions.index(dim) for dim in common_dims]

# Determine the standard_name, long_name and var_name
standard_name, long_name, var_name = get_names(cf_av_var, None, attributes)

# Create the AncillaryVariable
av = iris.coords.AncillaryVariable(
data,
standard_name=standard_name,
long_name=long_name,
var_name=var_name,
units=attr_units,
attributes=attributes)

# Add it to the cube
cube.add_ancillary_variable(av, data_dims)

# Make a list with names, stored on the engine, so we can find them all later.
engine.provides['ancillary_variables'].append((av, cf_av_var.cf_name))



################################################################################
Expand Down
44 changes: 30 additions & 14 deletions lib/iris/fileformats/netcdf.py
Original file line number Diff line number Diff line change
Expand Up @@ -459,7 +459,10 @@ def __setstate__(self, state):

def _assert_case_specific_facts(engine, cf, cf_group):
# Initialise pyke engine "provides" hooks.
# These are used to patch non-processed element attributes after rules activation.
engine.provides["coordinates"] = []
engine.provides["cell_measures"] = []
engine.provides["ancillary_variables"] = []

# Assert facts for CF coordinates.
for cf_name in cf_group.coordinates.keys():
Expand All @@ -479,6 +482,12 @@ def _assert_case_specific_facts(engine, cf, cf_group):
_PYKE_FACT_BASE, "cell_measure", (cf_name,)
)

# Assert facts for CF ancillary variables.
for cf_name in cf_group.ancillary_variables.keys():
engine.add_case_specific_fact(
_PYKE_FACT_BASE, "ancillary_variable", (cf_name,)
)

# Assert facts for CF grid_mappings.
for cf_name in cf_group.grid_mappings.keys():
engine.add_case_specific_fact(
Expand Down Expand Up @@ -597,31 +606,38 @@ def _load_cube(engine, cf, cf_var, filename):
# Run pyke inference engine with forward chaining rules.
engine.activate(_PYKE_RULE_BASE)

# Populate coordinate attributes with the untouched attributes from the
# associated CF-netCDF variable.
coordinates = engine.provides.get("coordinates", [])

# Having run the rules, now populate the attributes of all the cf elements with the
# "unused" attributes from the associated CF-netCDF variable.
# That is, all those that aren't CF reserved terms.
def attribute_predicate(item):
return item[0] not in _CF_ATTRS

for coord, cf_var_name in coordinates:
tmpvar = filter(
attribute_predicate, cf.cf_group[cf_var_name].cf_attrs_unused()
)
def add_unused_attributes(iris_object, cf_var):
tmpvar = filter(attribute_predicate, cf_var.cf_attrs_unused())
for attr_name, attr_value in tmpvar:
_set_attributes(coord.attributes, attr_name, attr_value)
_set_attributes(iris_object.attributes, attr_name, attr_value)

def fix_attributes_all_elements(role_name):
elements_and_names = engine.provides.get(role_name, [])

for iris_object, cf_var_name in elements_and_names:
add_unused_attributes(iris_object, cf.cf_group[cf_var_name])

# Populate the attributes of all coordinates, cell-measures and ancillary-vars.
fix_attributes_all_elements("coordinates")
fix_attributes_all_elements("ancillary_variables")
fix_attributes_all_elements("cell_measures")

tmpvar = filter(attribute_predicate, cf_var.cf_attrs_unused())
# Attach untouched attributes of the associated CF-netCDF data variable to
# the cube.
for attr_name, attr_value in tmpvar:
_set_attributes(cube.attributes, attr_name, attr_value)
# Also populate attributes of the top-level cube itself.
add_unused_attributes(cube, cf_var)

# Work out reference names for all the coords.
names = {
coord.var_name: coord.standard_name or coord.var_name or "unknown"
for coord in cube.coords()
}

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should there still be whitespace here?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ok, I suppose so.
I might have hoped that Black would take care of this, but clearly the results are not totally standardised ..

# Add all the cube cell methods.
cube.cell_methods = [
iris.coords.CellMethod(
method=method.method,
Expand Down
6 changes: 0 additions & 6 deletions lib/iris/tests/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -1297,12 +1297,6 @@ class MyPlotTests(test.GraphicsTest):
)


# TODO: remove these skips when iris-grib is fixed
skip_grib_fail = unittest.skipIf(
True, "Test(s) are failing due to known problems " 'with "iris-grib".'
)


skip_sample_data = unittest.skipIf(
not SAMPLE_DATA_AVAILABLE,
('Test(s) require "iris-sample-data", ' "which is not available."),
Expand Down
11 changes: 7 additions & 4 deletions lib/iris/tests/integration/test_grib2.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
if tests.GRIB_AVAILABLE:
from iris_grib import load_pairs_from_fields
from iris_grib.message import GribMessage
from iris_grib.grib_phenom_translation import GRIBCode


@tests.skip_data
Expand All @@ -36,7 +37,6 @@ def test_gdt1(self):
cube = load_cube(path)
self.assertCMLApproxData(cube)

@tests.skip_grib_fail
def test_gdt90_with_bitmap(self):
path = tests.get_data_path(("GRIB", "umukv", "ukv_chan9.grib2"))
cube = load_cube(path)
Expand Down Expand Up @@ -156,6 +156,7 @@ def test_save_load(self):
cube.add_aux_coord(tcoord)
cube.add_aux_coord(fpcoord)
cube.attributes["WMO_constituent_type"] = 0
cube.attributes["GRIB_PARAM"] = GRIBCode("GRIB2:d000c014n000")

with self.temp_filename("test_grib_pdt40.grib2") as temp_file_path:
save(cube, temp_file_path)
Expand Down Expand Up @@ -232,9 +233,12 @@ def test_save_load(self):
self.assertEqual(test_cube.shape, (744, 744))
self.assertEqual(test_cube.cell_methods, ())

# Check no cube attributes on the re-loaded cube.
# Check only the GRIB_PARAM attribute exists on the re-loaded cube.
# Note: this does *not* match the original, but is as expected.
self.assertEqual(cube_loaded_from_saved.attributes, {})
self.assertEqual(
cube_loaded_from_saved.attributes,
{"GRIB_PARAM": GRIBCode("GRIB2:d000c003n001")},
)

# Now remaining to check: coordinates + data...

Expand Down Expand Up @@ -300,7 +304,6 @@ def test_regular(self):
cube = load_cube(path)
self.assertCMLApproxData(cube)

@tests.skip_grib_fail
def test_reduced(self):
path = tests.get_data_path(("GRIB", "reduced", "reduced_gg.grib2"))
cube = load_cube(path)
Expand Down
1 change: 0 additions & 1 deletion lib/iris/tests/integration/test_grib_load.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,6 @@ def test_reduced_ll(self):
)
self.assertCML(cube, ("grib_load", "reduced_ll_grib1.cml"))

@tests.skip_grib_fail
def test_reduced_gg(self):
cube = iris.load_cube(
tests.get_data_path(("GRIB", "reduced", "reduced_gg.grib2"))
Expand Down
Loading