Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions INSTALL
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,10 @@ grib-api 1.9.16 or later (http://www.ecmwf.int/products/data/software/download/g
matplotlib 1.2.0 (http://matplotlib.sourceforge.net/)
Python package for 2D plotting.

mock 1.0.1 (http://pypi.python.org/pypi/mock/)
Python mocking and patching package for testing. Note that this package
is only required to support the Iris unit tests.

nose 1.1.2 or later (http://nose.readthedocs.org/en/latest/)
Python package for software testing. Iris is not compatible with nose2.

Expand Down
107 changes: 71 additions & 36 deletions lib/iris/fileformats/ff.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,13 +24,13 @@

import numpy as np

import iris.config
import iris.fileformats.manager
from iris.exceptions import NotYetImplementedError
from iris.fileformats.manager import DataManager
import pp


FF_HEADER_DEPTH = 256 # in words (64-bit)
FF_WORD_DEPTH = 8 # in bytes
FF_HEADER_DEPTH = 256 # In words (64-bit).
FF_WORD_DEPTH = 8 # In bytes.

# UM marker to signify empty lookup table entry.
_FF_LOOKUP_TABLE_TERMINATE = -99
Expand Down Expand Up @@ -95,6 +95,10 @@
'lookup_table',
'data', ]

_LBUSER_DTYPE_LOOKUP = {1: np.dtype('>f8'),
2: np.dtype('>i8'),
3: np.dtype('>i8'),
'default': np.dtype('>f8'), }

class FFHeader(object):
"""A class to represent the FIXED_LENGTH_HEADER section of a FieldsFile."""
Expand Down Expand Up @@ -141,7 +145,8 @@ def __repr__(self):

def valid(self, name):
"""
Determine whether the FieldsFile FIXED_LENGTH_HEADER pointer attribute has a valid FieldsFile address.
Determine whether the FieldsFile FIXED_LENGTH_HEADER pointer attribute
has a valid FieldsFile address.

Args:

Expand All @@ -154,9 +159,10 @@ def valid(self, name):
"""

if name in _FF_HEADER_POINTERS:
value = getattr(self, name)[0] != _FF_HEADER_POINTER_NULL
value = getattr(self, name)[0] > _FF_HEADER_POINTER_NULL
else:
raise AttributeError("'%s' object does not have pointer attribute '%s'" % (self.__class__.__name__, name))
msg = '{!r} object does not have pointer attribute {!r}'
raise AttributeError(msg.format(self.__class__.__name__, name))
return value

def address(self, name):
Expand All @@ -176,7 +182,8 @@ def address(self, name):
if name in _FF_HEADER_POINTERS:
value = getattr(self, name)[0] * FF_WORD_DEPTH
else:
raise AttributeError("'%s' object does not have pointer attribute '%s'" % (self.__class__.__name__, name))
msg = '{!r} object does not have pointer attribute {!r}'
raise AttributeError(msg.format(self.__class__.__name__, name))
return value

def shape(self, name):
Expand All @@ -196,7 +203,8 @@ def shape(self, name):
if name in _FF_HEADER_POINTERS:
value = getattr(self, name)[1:]
else:
raise AttributeError("'%s' object does not have pointer address '%s'" % (self.__class_.__name__, name))
msg = '{!r} object does not have pointer address {!r}'
raise AttributeError(msg.format(self.__class_.__name__, name))
return value


Expand Down Expand Up @@ -232,6 +240,33 @@ def __init__(self, filename, read_data=False):
self._ff_header = FFHeader(filename)
self._filename = filename
self._read_data = read_data

def _payload(self, field):
'''Calculate the payload data depth (in bytes) and type.'''

if field.lbpack.n1 == 0:
# Data payload is not packed.
data_depth = (field.lblrec - field.lbext) * FF_WORD_DEPTH
# Determine PP field 64-bit payload datatype.
lookup = _LBUSER_DTYPE_LOOKUP
data_type = lookup.get(field.lbuser[0], lookup['default'])
else:
# Data payload is packed.
if field.lbpack.n1 == 1:
# Data packed using WGDOS archive method.
data_depth = ((field.lbnrec * 2) - 1) * pp.PP_WORD_DEPTH
elif field.lbpack.n1 == 2:
# Data packed using CRAY 32-bit method.
data_depth = (field.lblrec - field.lbext) * pp.PP_WORD_DEPTH
else:
msg = 'PP fields with LBPACK of {} are not supported.'
raise NotYetImplementedError(msg.format(field.lbpack))

# Determine PP field payload datatype.
lookup = pp.LBUSER_DTYPE_LOOKUP
data_type = lookup.get(field.lbuser[0], lookup['default'])

return data_depth, data_type

def _extract_field(self):
# FF table pointer initialisation based on FF LOOKUP table configuration.
Expand All @@ -241,53 +276,53 @@ def _extract_field(self):
# Open the FF for processing.
ff_file = open(self._ff_header.ff_filename, 'rb')
ff_file_seek = ff_file.seek

# Check for an instantaneous dump.
if self._ff_header.dataset_type == 1:
table_count = self._ff_header.total_prognostic_fields

# Process each FF LOOKUP table entry.
while table_count:
table_count -= 1
# Move file pointer to the start of the current FF LOOKUP table entry.
ff_file_seek(table_offset, os.SEEK_SET)
# Read the current PP header entry from the FF LOOKUP table.
pp_header_integers = np.fromfile(ff_file, dtype='>i8', count=pp.NUM_LONG_HEADERS) # 64-bit words.
pp_header_floats = np.fromfile(ff_file, dtype='>f8', count=pp.NUM_FLOAT_HEADERS) # 64-bit words.
pp_header_data = tuple(pp_header_integers) + tuple(pp_header_floats)
header_integers = np.fromfile(ff_file, dtype='>i8',
count=pp.NUM_LONG_HEADERS)
header_floats = np.fromfile(ff_file, dtype='>f8',
count=pp.NUM_FLOAT_HEADERS)
# In 64-bit words.
header_data = tuple(header_integers) + tuple(header_floats)
# Check whether the current FF LOOKUP table entry is valid.
if pp_header_data[0] == _FF_LOOKUP_TABLE_TERMINATE:
if header_data[0] == _FF_LOOKUP_TABLE_TERMINATE:
# There are no more FF LOOKUP table entries to read.
break
# Calculate next FF LOOKUP table entry.
table_offset += table_entry_depth
# Construct a PPField object and populate using the pp_header_data
# Construct a PPField object and populate using the header_data
# read from the current FF LOOKUP table.
# (The PPField sub-class will depend on the header release number.)
pp_field = pp.make_pp_field(pp_header_data)
# Calculate file pointer address for the start of the associated PP header data.
data_offset = pp_field.lbegin * FF_WORD_DEPTH
# Determine PP field payload depth.
pp_data_extra_depth = pp_field.lbext
if pp_field.lbpack:
# Convert PP field LBNREC, representing a count in 64-bit words,
# into its associated count in bytes.
pp_data_depth = ((pp_field.lbnrec * 2) - 1) * pp.PP_WORD_DEPTH # in bytes
else:
pp_data_depth = (pp_field.lblrec - pp_data_extra_depth) * pp.PP_WORD_DEPTH # in bytes

# Determine PP field payload datatype.
pp_data_type = pp.LBUSER_DTYPE_LOOKUP.get(pp_field.lbuser[0], pp.LBUSER_DTYPE_LOOKUP['default'])

field = pp.make_pp_field(header_data)
# Calculate start address of the associated PP header data.
data_offset = field.lbegin * FF_WORD_DEPTH
# Determine PP field payload depth and type.
data_depth, data_type = self._payload(field)
# Determine PP field data shape.
pp_data_shape = (pp_field.lbrow, pp_field.lbnpt)
data_shape = (field.lbrow, field.lbnpt)
# Determine whether to read the associated PP field data.
if self._read_data:
# Move file pointer to the start of the current PP field data.
ff_file_seek(data_offset, os.SEEK_SET)
# Get the PP field data.
data = pp_field.read_data(ff_file, pp_data_depth, pp_data_shape, pp_data_type)
pp_field._data = data
pp_field._data_manager = None
data = field.read_data(ff_file, data_depth, data_shape, data_type)
field._data = data
field._data_manager = None
else:
pp_field._data = np.array(pp.PPDataProxy(self._ff_header.ff_filename, data_offset, pp_data_depth, pp_field.lbpack))
pp_field._data_manager = iris.fileformats.manager.DataManager(pp_data_shape, pp_data_type, pp_field.bmdi)
yield pp_field
proxy = pp.PPDataProxy(self._filename, data_offset,
data_depth, field.lbpack)
field._data = np.array(proxy)
field._data_manager = DataManager(data_shape, data_type, field.bmdi)
yield field
ff_file.close()
return

Expand Down
22 changes: 17 additions & 5 deletions lib/iris/fileformats/pp.py
Original file line number Diff line number Diff line change
Expand Up @@ -670,12 +670,14 @@ def load(self, data_shape, data_type, mdi, deferred_slice):

def _read_data(pp_file, lbpack, data_len, data_shape, data_type, mdi):
"""Read the data from the given file object given its precise location in the file."""
if lbpack == 0:
if lbpack.n1 == 0:
data = numpy.fromfile(pp_file, dtype=data_type, count=data_len / data_type.itemsize)
elif lbpack == 1:
elif lbpack.n1 == 1:
data = pp_file.read(data_len)
data = pp_packing.wgdos_unpack(data, data_shape[0], data_shape[1], mdi)
elif lbpack == 4:
elif lbpack.n1 == 2:
data = numpy.fromfile(pp_file, dtype=data_type, count=data_len / data_type.itemsize)
elif lbpack.n1 == 4:
data = numpy.fromfile(pp_file, dtype=data_type, count=data_len / data_type.itemsize)
data = pp_packing.rle_decode(data, data_shape[0], data_shape[1], mdi)
else:
Expand All @@ -691,13 +693,13 @@ def _read_data(pp_file, lbpack, data_len, data_shape, data_type, mdi):

# Mask the array?
if mdi in data:
data = numpy.ma.masked_values(data, mdi, copy=False)
data = numpy.ma.masked_values(data, mdi, copy=False)

return data


# The special headers of the PPField classes which get some improved functionality
_SPECIAL_HEADERS = ('lbtim', 'lbcode', 'lbproc', 'data', 'data_manager')
_SPECIAL_HEADERS = ('lbtim', 'lbcode', 'lbpack', 'lbproc', 'data', 'data_manager')

def _header_defn(release_number):
"""
Expand Down Expand Up @@ -809,6 +811,16 @@ def _lbcode_setter(self, new_value):

lbcode = property(lambda self: self._lbcode, _lbcode_setter)

# lbpack
def _lbpack_setter(self, new_value):
if not isinstance(new_value, SplittableInt):
# add the n1/n2/n3/n4/n5 values for lbpack
name_mapping = dict(n5=slice(4, None), n4=3, n3=2, n2=1, n1=0)
new_value = SplittableInt(new_value, name_mapping)
self._lbpack = new_value

lbpack = property(lambda self: self._lbpack, _lbpack_setter)

# lbproc
def _lbproc_setter(self, new_value):
if not isinstance(new_value, BitwiseInt):
Expand Down
105 changes: 105 additions & 0 deletions lib/iris/tests/test_ff.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,29 @@
import collections
import os.path

import mock
import numpy

import iris
import iris.fileformats.ff as ff
import iris.fileformats.pp as pp


_MOCK_FIELD = collections.namedtuple('MockField',
'lbext lblrec lbnrec lbpack lbuser')
_MOCK_LBPACK = collections.namedtuple('MockPack', 'n1')

# PP-field: LBPACK N1 values.
_UNPACKED = 0
_WGDOS = 1
_CRAY = 2
_GRIB = 3 # Not implemented.
_RLE = 4 # Not supported, deprecated FF format.

# PP-field: LBUSER(1) values.
_REAL = 1
_INTEGER = 2
_LOGICAL = 3 # Not implemented.


class TestFF_HEADER(tests.IrisTest):
Expand Down Expand Up @@ -179,5 +200,89 @@ def test_unit_pass_0(self):
self.assertCML(cube, ('FF', filename))


class TestFFPayload(tests.IrisTest):
filename = 'mockery'

def _test_payload(self, mock_field, expected_depth, expected_type):
with mock.patch('iris.fileformats.ff.FFHeader') as mock_header:
mock_header.return_value = None
ff2pp = ff.FF2PP(self.filename)
data_depth, data_type = ff2pp._payload(mock_field)
self.assertEqual(data_depth, expected_depth)
self.assertEqual(data_type, expected_type)

def test_payload_unpacked_real(self):
mock_field = _MOCK_FIELD(lbext=0, lblrec=100, lbnrec=-1,
lbpack=_MOCK_LBPACK(_UNPACKED),
lbuser=[_REAL])
self._test_payload(mock_field, 800, ff._LBUSER_DTYPE_LOOKUP[_REAL])

def test_payload_unpacked_real_ext(self):
mock_field = _MOCK_FIELD(lbext=50, lblrec=100, lbnrec=-1,
lbpack=_MOCK_LBPACK(_UNPACKED),
lbuser=[_REAL])
self._test_payload(mock_field, 400, ff._LBUSER_DTYPE_LOOKUP[_REAL])

def test_payload_unpacked_integer(self):
mock_field = _MOCK_FIELD(lbext=0, lblrec=200, lbnrec=-1,
lbpack=_MOCK_LBPACK(_UNPACKED),
lbuser=[_INTEGER])
self._test_payload(mock_field, 1600, ff._LBUSER_DTYPE_LOOKUP[_INTEGER])

def test_payload_unpacked_integer_ext(self):
mock_field = _MOCK_FIELD(lbext=100, lblrec=200, lbnrec=-1,
lbpack=_MOCK_LBPACK(_UNPACKED),
lbuser=[_INTEGER])
self._test_payload(mock_field, 800, ff._LBUSER_DTYPE_LOOKUP[_INTEGER])

def test_payload_wgdos_real(self):
mock_field = _MOCK_FIELD(lbext=0, lblrec=-1, lbnrec=100,
lbpack=_MOCK_LBPACK(_WGDOS),
lbuser=[_REAL])
self._test_payload(mock_field, 796, pp.LBUSER_DTYPE_LOOKUP[_REAL])

def test_payload_wgdos_real_ext(self):
mock_field = _MOCK_FIELD(lbext=50, lblrec=-1, lbnrec=100,
lbpack=_MOCK_LBPACK(_WGDOS),
lbuser=[_REAL])
self._test_payload(mock_field, 796, pp.LBUSER_DTYPE_LOOKUP[_REAL])

def test_payload_wgdos_integer(self):
mock_field = _MOCK_FIELD(lbext=0, lblrec=-1, lbnrec=200,
lbpack=_MOCK_LBPACK(_WGDOS),
lbuser=[_INTEGER])
self._test_payload(mock_field, 1596, pp.LBUSER_DTYPE_LOOKUP[_INTEGER])

def test_payload_wgdos_integer_ext(self):
mock_field = _MOCK_FIELD(lbext=100, lblrec=-1, lbnrec=200,
lbpack=_MOCK_LBPACK(_WGDOS),
lbuser=[_INTEGER])
self._test_payload(mock_field, 1596, pp.LBUSER_DTYPE_LOOKUP[_INTEGER])

def test_payload_cray_real(self):
mock_field = _MOCK_FIELD(lbext=0, lblrec=100, lbnrec=-1,
lbpack=_MOCK_LBPACK(_CRAY),
lbuser=[_REAL])
self._test_payload(mock_field, 400, pp.LBUSER_DTYPE_LOOKUP[_REAL])

def test_payload_cray_real_ext(self):
mock_field = _MOCK_FIELD(lbext=50, lblrec=100, lbnrec=-1,
lbpack=_MOCK_LBPACK(_CRAY),
lbuser=[_REAL])
self._test_payload(mock_field, 200, pp.LBUSER_DTYPE_LOOKUP[_REAL])

def test_payload_cray_integer(self):
mock_field = _MOCK_FIELD(lbext=0, lblrec=200, lbnrec=-1,
lbpack=_MOCK_LBPACK(_CRAY),
lbuser=[_INTEGER])
self._test_payload(mock_field, 800, pp.LBUSER_DTYPE_LOOKUP[_INTEGER])

def test_payload_cray_integer_ext(self):
mock_field = _MOCK_FIELD(lbext=100, lblrec=200, lbnrec=-1,
lbpack=_MOCK_LBPACK(_CRAY),
lbuser=[_INTEGER])
self._test_payload(mock_field, 400, pp.LBUSER_DTYPE_LOOKUP[_INTEGER])


if __name__ == '__main__':
tests.main()