From 469fbd9e560ab8de4da9103e5ca746c4aee9c89b Mon Sep 17 00:00:00 2001 From: Ben Cipollini Date: Fri, 5 Feb 2016 11:03:58 -0800 Subject: [PATCH 01/11] STY: autopep8 on source code. --- nibabel/affines.py | 4 +- nibabel/analyze.py | 24 ++++++------ nibabel/batteryrunners.py | 1 + nibabel/casting.py | 4 +- nibabel/checkwarns.py | 2 + nibabel/data.py | 11 ++++-- nibabel/deprecated.py | 1 + nibabel/dft.py | 11 ++++-- nibabel/ecat.py | 23 ++++++------ nibabel/eulerangles.py | 24 ++++++------ nibabel/fileholders.py | 1 + nibabel/fileslice.py | 6 +-- nibabel/freesurfer/mghformat.py | 14 +++---- nibabel/gifti/gifti.py | 2 + nibabel/gifti/parse_gifti_fast.py | 9 ++--- nibabel/imageclasses.py | 2 + nibabel/imageglobals.py | 2 + nibabel/info.py | 36 +++++++++--------- nibabel/keywordonly.py | 2 +- nibabel/minc1.py | 12 ++++-- nibabel/minc2.py | 3 ++ nibabel/nicom/csareader.py | 4 +- nibabel/nicom/dwiparams.py | 10 ++--- nibabel/nicom/structreader.py | 1 + nibabel/nifti1.py | 36 +++++++++--------- nibabel/nifti2.py | 2 +- nibabel/onetime.py | 1 + nibabel/openers.py | 2 +- nibabel/orientations.py | 2 +- nibabel/parrec.py | 22 ++++++----- nibabel/quaternions.py | 62 +++++++++++++++---------------- nibabel/spatialimages.py | 3 +- nibabel/spm2analyze.py | 2 +- nibabel/spm99analyze.py | 12 +++--- nibabel/tmpdirs.py | 3 ++ nibabel/trackvis.py | 7 ++-- nibabel/tripwire.py | 1 + nibabel/volumeutils.py | 12 +++--- setup.py | 7 ++-- 39 files changed, 212 insertions(+), 171 deletions(-) diff --git a/nibabel/affines.py b/nibabel/affines.py index adc093f053..de89612add 100644 --- a/nibabel/affines.py +++ b/nibabel/affines.py @@ -214,7 +214,7 @@ def append_diag(aff, steps, starts=()): starts = np.zeros(n_steps, dtype=steps.dtype) elif len(starts) != n_steps: raise ValueError('Steps should have same length as starts') - old_n_out, old_n_in = aff.shape[0]-1, aff.shape[1]-1 + old_n_out, old_n_in = aff.shape[0] - 1, aff.shape[1] - 1 # make new affine aff_plus = np.zeros((old_n_out + n_steps + 1, old_n_in + n_steps + 1), dtype=aff.dtype) @@ -223,7 +223,7 @@ def append_diag(aff, steps, starts=()): aff_plus[:old_n_out, -1] = aff[:old_n_out, -1] # Add new diagonal elements for i, el in enumerate(steps): - aff_plus[old_n_out+i, old_n_in+i] = el + aff_plus[old_n_out + i, old_n_in + i] = el # Add translations for new affine, plus last 1 aff_plus[old_n_out:, -1] = list(starts) + [1] return aff_plus diff --git a/nibabel/analyze.py b/nibabel/analyze.py index ee0d127a24..2f8cf03988 100644 --- a/nibabel/analyze.py +++ b/nibabel/analyze.py @@ -107,7 +107,7 @@ ('session_error', 'i2'), ('regular', 'S1'), ('hkey_un0', 'S1') - ] +] image_dimension_dtd = [ ('dim', 'i2', (8,)), ('vox_units', 'S4'), @@ -127,7 +127,7 @@ ('verified', 'i4'), ('glmax', 'i4'), ('glmin', 'i4') - ] +] data_history_dtd = [ ('descrip', 'S80'), ('aux_file', 'S24'), @@ -147,7 +147,7 @@ ('omin', 'i4'), ('smax', 'i4'), ('smin', 'i4') - ] +] # Full header numpy dtype combined across sub-fields header_dtype = np.dtype(header_key_dtd + image_dimension_dtd + @@ -606,7 +606,7 @@ def get_data_shape(self): ndims = dims[0] if ndims == 0: return 0, - return tuple(int(d) for d in dims[1:ndims+1]) + return tuple(int(d) for d in dims[1:ndims + 1]) def set_data_shape(self, shape): ''' Set shape of data @@ -624,18 +624,18 @@ def set_data_shape(self, shape): dims[:] = 1 dims[0] = ndims try: - dims[1:ndims+1] = shape + dims[1:ndims + 1] = shape except (ValueError, OverflowError): # numpy 1.4.1 at least generates a ValueError from trying to set a # python long into an int64 array (dims are int64 for nifti2) values_fit = False else: - values_fit = np.all(dims[1:ndims+1] == shape) + values_fit = np.all(dims[1:ndims + 1] == shape) # Error if we did not succeed setting dimensions if not values_fit: raise HeaderDataError('shape %s does not fit in dim datatype' % (shape,)) - self._structarr['pixdim'][ndims+1:] = 1.0 + self._structarr['pixdim'][ndims + 1:] = 1.0 def get_base_affine(self): ''' Get affine from basic (shared) header fields @@ -659,8 +659,8 @@ def get_base_affine(self): hdr = self._structarr dims = hdr['dim'] ndim = dims[0] - return shape_zoom_affine(hdr['dim'][1:ndim+1], - hdr['pixdim'][1:ndim+1], + return shape_zoom_affine(hdr['dim'][1:ndim + 1], + hdr['pixdim'][1:ndim + 1], self.default_x_flip) get_best_affine = get_base_affine @@ -691,7 +691,7 @@ def get_zooms(self): if ndim == 0: return (1.0,) pixdims = hdr['pixdim'] - return tuple(pixdims[1:ndim+1]) + return tuple(pixdims[1:ndim + 1]) def set_zooms(self, zooms): ''' Set zooms into header fields @@ -708,7 +708,7 @@ def set_zooms(self, zooms): if np.any(zooms < 0): raise HeaderDataError('zooms must be positive') pixdims = hdr['pixdim'] - pixdims[1:ndim+1] = zooms[:] + pixdims[1:ndim + 1] = zooms[:] def as_analyze_map(self): """ Return header as mapping for conversion to Analyze types @@ -794,7 +794,7 @@ def set_slope_inter(self, slope, inter=None): If float, value must be 0.0 or we raise a ``HeaderTypeError`` ''' if ((slope in (None, 1) or np.isnan(slope)) and - (inter in (None, 0) or np.isnan(inter))): + (inter in (None, 0) or np.isnan(inter))): return raise HeaderTypeError('Cannot set slope != 1 or intercept != 0 ' 'for Analyze headers') diff --git a/nibabel/batteryrunners.py b/nibabel/batteryrunners.py index f63373c013..be3977111a 100644 --- a/nibabel/batteryrunners.py +++ b/nibabel/batteryrunners.py @@ -175,6 +175,7 @@ def __len__(self): class Report(object): + def __init__(self, error=Exception, problem_level=0, diff --git a/nibabel/casting.py b/nibabel/casting.py index b7b3bb52ca..8707f94e9a 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -272,7 +272,7 @@ def type_info(np_type): if not np_type in (np.longdouble, np.longcomplex) or width not in (16, 32): raise FloatingError('We had not expected type %s' % np_type) if (vals == (1, 1, 16) and on_powerpc() and - _check_maxexp(np.longdouble, 1024)): + _check_maxexp(np.longdouble, 1024)): # double pair on PPC. The _check_nmant routine does not work for this # type, hence the powerpc platform check instead ret.update(dict(nmant=106, width=width)) @@ -664,7 +664,7 @@ def best_float(): except FloatingError: return np.float64 if (long_info['nmant'] > type_info(np.float64)['nmant'] and - machine() != 'sparc64'): # sparc has crazy-slow float128 + machine() != 'sparc64'): # sparc has crazy-slow float128 return np.longdouble return np.float64 diff --git a/nibabel/checkwarns.py b/nibabel/checkwarns.py index c06c462972..52c6c718e9 100644 --- a/nibabel/checkwarns.py +++ b/nibabel/checkwarns.py @@ -19,6 +19,7 @@ class ErrorWarnings(error_warnings): + def __init__(self, *args, **kwargs): warnings.warn('ErrorWarnings is deprecated and will be removed in ' 'nibabel v3.0; use nibabel.testing.error_warnings.', @@ -27,6 +28,7 @@ def __init__(self, *args, **kwargs): class IgnoreWarnings(suppress_warnings): + def __init__(self, *args, **kwargs): warnings.warn('IgnoreWarnings is deprecated and will be removed in ' 'nibabel v3.0; use nibabel.testing.suppress_warnings.', diff --git a/nibabel/data.py b/nibabel/data.py index 227b5eb8be..c56146ed31 100644 --- a/nibabel/data.py +++ b/nibabel/data.py @@ -33,6 +33,7 @@ class BomberError(DataError, AttributeError): class Datasource(object): ''' Simple class to add base path to relative path ''' + def __init__(self, base_path): ''' Initialize datasource @@ -87,7 +88,7 @@ def list_files(self, relative=True): out_list = list() for base, dirs, files in os.walk(self.base_path): if relative: - base = base[len(self.base_path)+1:] + base = base[len(self.base_path) + 1:] for filename in files: out_list.append(pjoin(base, filename)) return out_list @@ -97,6 +98,7 @@ class VersionedDatasource(Datasource): ''' Datasource with version information in config file ''' + def __init__(self, base_path, config_filename=None): ''' Initialize versioned datasource @@ -239,8 +241,8 @@ def find_data_dir(root_dirs, *names): if os.path.isdir(pth): return pth raise DataError('Could not find datasource "%s" in data path "%s"' % - (ds_relative, - os.path.pathsep.join(root_dirs))) + (ds_relative, + os.path.pathsep.join(root_dirs))) def make_datasource(pkg_def, **kwargs): @@ -304,6 +306,7 @@ def make_datasource(pkg_def, **kwargs): class Bomber(object): ''' Class to raise an informative error when used ''' + def __init__(self, name, msg): self.name = name self.msg = msg @@ -350,7 +353,7 @@ def datasource_or_bomber(pkg_def, **options): return Bomber(sys_relpath, str(e)) # check version if (version is None or - LooseVersion(ds.version) >= LooseVersion(version)): + LooseVersion(ds.version) >= LooseVersion(version)): return ds if 'name' in pkg_def: pkg_name = pkg_def['name'] diff --git a/nibabel/deprecated.py b/nibabel/deprecated.py index 2a3e66d49d..805903d0f6 100644 --- a/nibabel/deprecated.py +++ b/nibabel/deprecated.py @@ -24,6 +24,7 @@ class ModuleProxy(object): when you do attribute access and return the attributes of the imported module. """ + def __init__(self, module_name): self._module_name = module_name diff --git a/nibabel/dft.py b/nibabel/dft.py index 3410db6ec0..2200355920 100644 --- a/nibabel/dft.py +++ b/nibabel/dft.py @@ -57,10 +57,11 @@ def __init__(self, series, i, si): def __str__(self): fmt = 'expecting instance number %d, got %d' - return fmt % (self.i+1, self.si.instance_number) + return fmt % (self.i + 1, self.si.instance_number) class _Study(object): + def __init__(self, d): self.uid = d['uid'] self.date = d['date'] @@ -93,6 +94,7 @@ def patient_name_or_uid(self): class _Series(object): + def __init__(self, d): self.uid = d['uid'] self.study = d['study'] @@ -160,7 +162,7 @@ def as_nifti(self): for (i, si) in enumerate(self.storage_instances): if i + 1 != si.instance_number: raise InstanceStackError(self, i, si) - logger.info('reading %d/%d' % (i+1, len(self.storage_instances))) + logger.info('reading %d/%d' % (i + 1, len(self.storage_instances))) d = self.storage_instances[i].dicom() data[i, :, :] = d.pixel_array @@ -190,7 +192,7 @@ def as_nifti(self): m = ((pdi * cosi[0], pdj * cosj[0], pdk * cosk[0], pos_1[0]), (pdi * cosi[1], pdj * cosj[1], pdk * cosk[1], pos_1[1]), (pdi * cosi[2], pdj * cosj[2], pdk * cosk[2], pos_1[2]), - ( 0, 0, 0, 1)) + (0, 0, 0, 1)) m = numpy.array(m) @@ -212,6 +214,7 @@ def nifti_size(self): class _StorageInstance(object): + def __init__(self, d): self.uid = d['uid'] self.instance_number = d['instance_number'] @@ -238,6 +241,7 @@ def dicom(self): class _db_nochange: """context guard for read-only database access""" + def __enter__(self): self.c = DB.cursor() return self.c @@ -251,6 +255,7 @@ def __exit__(self, type, value, traceback): class _db_change: """context guard for database access requiring a commit""" + def __enter__(self): self.c = DB.cursor() return self.c diff --git a/nibabel/ecat.py b/nibabel/ecat.py index 7b8efc4656..bd1d7b589f 100644 --- a/nibabel/ecat.py +++ b/nibabel/ecat.py @@ -117,7 +117,7 @@ ('data_units', '32S'), ('septa_state', np.uint16), ('fill', '12S') - ] +] hdr_dtype = np.dtype(main_header_dtd) @@ -384,7 +384,7 @@ def read_mlist(fileobj, endianness): mlist = [] return mlist # Use all but first housekeeping row - mlists.append(rows[1:n_rows+1]) + mlists.append(rows[1:n_rows + 1]) mlist_index += n_rows if mlist_block_no <= 2: # should block_no in (1, 2) be an error? break @@ -467,7 +467,7 @@ def get_series_framenumbers(mlist): try: for frame_stored, (true_order, _) in frames_order.items(): # frame as stored in file -> true number in series - frame_dict[frame_stored] = trueframenumbers[true_order]+1 + frame_dict[frame_stored] = trueframenumbers[true_order] + 1 return frame_dict except: raise IOError('Error in header or mlist order unknown') @@ -571,7 +571,7 @@ def get_frame_affine(self, frame=0): dims = self.get_shape(frame) # get translations from center of image - origin_offset = (np.array(dims)-1) / 2.0 + origin_offset = (np.array(dims) - 1) / 2.0 aff = np.diag(zooms) aff[:3, -1] = -origin_offset * zooms[:-1] + np.array([x_off, y_off, z_off]) @@ -664,6 +664,7 @@ class EcatImageArrayProxy(object): The array proxy allows us to freeze the passed fileobj and header such that it returns the expected data array. ''' + def __init__(self, subheader): self._subheader = subheader self._data = None @@ -706,7 +707,7 @@ def __getitem__(self, sliceobj): slice3 = sliceobj[ax_inds[3]] # We will load volume by volume. Make slicer into volume by dropping # index over the volume axis - in_slicer = sliceobj[:ax_inds[3]] + sliceobj[ax_inds[3]+1:] + in_slicer = sliceobj[:ax_inds[3]] + sliceobj[ax_inds[3] + 1:] # int index for 4th axis, load one slice if isinstance(slice3, Integral): data = self._subheader.data_from_fileobj(frame_mapping[slice3][0]) @@ -868,16 +869,16 @@ def from_file_map(klass, file_map): hdr_fid = hdr_file.get_prepare_fileobj(mode='rb') header = klass._header.from_fileobj(hdr_fid) hdr_copy = header.copy() - ### LOAD MLIST + # LOAD MLIST mlist = np.zeros((header['num_frames'], 4), dtype=np.int32) mlist_data = read_mlist(hdr_fid, hdr_copy.endianness) mlist[:len(mlist_data)] = mlist_data - ### LOAD SUBHEADERS + # LOAD SUBHEADERS subheaders = klass._subheader(hdr_copy, mlist, hdr_fid) - ### LOAD DATA - ## Class level ImageArrayProxy + # LOAD DATA + # Class level ImageArrayProxy data = klass.ImageArrayProxy(subheaders) - ## Get affine + # Get affine if not subheaders._check_affines(): warnings.warn('Affines different across frames, loading affine ' 'from FIRST frame', UserWarning) @@ -961,7 +962,7 @@ def to_file_map(self, file_map=None): image = self._subheader.raw_data_from_fileobj(index) # Write frame images - self._write_data(image, imgf, pos+2, endianness='>') + self._write_data(image, imgf, pos + 2, endianness='>') # Move to dictionnary offset and write dictionnary entry self._write_data(mlist[index], imgf, entry_pos, endianness='>') diff --git a/nibabel/eulerangles.py b/nibabel/eulerangles.py index 3f8fa36304..e633a6a22c 100644 --- a/nibabel/eulerangles.py +++ b/nibabel/eulerangles.py @@ -250,15 +250,15 @@ def mat2euler(M, cy_thresh=None): cy_thresh = _FLOAT_EPS_4 r11, r12, r13, r21, r22, r23, r31, r32, r33 = M.flat # cy: sqrt((cos(y)*cos(z))**2 + (cos(x)*cos(y))**2) - cy = math.sqrt(r33*r33 + r23*r23) + cy = math.sqrt(r33 * r33 + r23 * r23) if cy > cy_thresh: # cos(y) not close to zero, standard form - z = math.atan2(-r12, r11) # atan2(cos(y)*sin(z), cos(y)*cos(z)) - y = math.atan2(r13, cy) # atan2(sin(y), cy) + z = math.atan2(-r12, r11) # atan2(cos(y)*sin(z), cos(y)*cos(z)) + y = math.atan2(r13, cy) # atan2(sin(y), cy) x = math.atan2(-r23, r33) # atan2(cos(y)*sin(x), cos(x)*cos(y)) else: # cos(y) (close to) zero, so x -> 0.0 (see above) # so r21 -> sin(z), r22 -> cos(z) and - z = math.atan2(r21, r22) - y = math.atan2(r13, cy) # atan2(sin(y), cy) + z = math.atan2(r21, r22) + y = math.atan2(r13, cy) # atan2(sin(y), cy) x = 0.0 return z, y, x @@ -295,19 +295,19 @@ def euler2quat(z=0, y=0, x=0): https://en.wikipedia.org/wiki/Quaternions#Hamilton_product - to formulae from 2.) to give formula for combined rotations. ''' - z = z/2.0 - y = y/2.0 - x = x/2.0 + z = z / 2.0 + y = y / 2.0 + x = x / 2.0 cz = math.cos(z) sz = math.sin(z) cy = math.cos(y) sy = math.sin(y) cx = math.cos(x) sx = math.sin(x) - return np.array([cx*cy*cz - sx*sy*sz, - cx*sy*sz + cy*cz*sx, - cx*cz*sy - sx*cy*sz, - cx*cy*sz + sx*cz*sy]) + return np.array([cx * cy * cz - sx * sy * sz, + cx * sy * sz + cy * cz * sx, + cx * cz * sy - sx * cy * sz, + cx * cy * sz + sx * cz * sy]) def quat2euler(q): diff --git a/nibabel/fileholders.py b/nibabel/fileholders.py index c096267a0b..5a858f1dbf 100644 --- a/nibabel/fileholders.py +++ b/nibabel/fileholders.py @@ -20,6 +20,7 @@ class FileHolderError(Exception): class FileHolder(object): ''' class to contain filename, fileobj and file position ''' + def __init__(self, filename=None, fileobj=None, diff --git a/nibabel/fileslice.py b/nibabel/fileslice.py index feecc718d4..3ba907e019 100644 --- a/nibabel/fileslice.py +++ b/nibabel/fileslice.py @@ -88,7 +88,7 @@ def canonical_slicers(sliceobj, shape, check_inds=True): can_slicers.append(None) continue if slicer == Ellipsis: - remaining = sliceobj[i+1:] + remaining = sliceobj[i + 1:] if Ellipsis in remaining: raise ValueError("More than one Ellipsis in slicing " "expression") @@ -276,7 +276,7 @@ def _positive_slice(slicer): n = gap / step n = int(n) - 1 if int(n) == n else int(n) end = start + n * step - return slice(end, start+1, -step) + return slice(end, start + 1, -step) def threshold_heuristic(slicer, @@ -399,7 +399,7 @@ def optimize_slicer(slicer, dim_len, all_full, is_slowest, stride, if slicer == slice(0, dim_len, 1): return slice(None), slice(None) # full, but reversed - if slicer == slice(dim_len-1, None, -1): + if slicer == slice(dim_len - 1, None, -1): return slice(None), slice(None, None, -1) # Not full, mabye continuous is_int = False diff --git a/nibabel/freesurfer/mghformat.py b/nibabel/freesurfer/mghformat.py index ae2c7cd7f9..83c6af0ef8 100644 --- a/nibabel/freesurfer/mghformat.py +++ b/nibabel/freesurfer/mghformat.py @@ -15,7 +15,7 @@ from ..volumeutils import (array_to_file, array_from_file, Recoder) from ..spatialimages import HeaderDataError, SpatialImage -from ..fileholders import FileHolder, copy_file_map +from ..fileholders import FileHolder, copy_file_map from ..arrayproxy import ArrayProxy from ..keywordonly import kw_only_meth from ..openers import ImageOpener @@ -33,11 +33,11 @@ ('delta', '>f4', (3,)), ('Mdc', '>f4', (3, 3)), ('Pxyz_c', '>f4', (3,)) - ] +] # Optional footer. Also has more stuff after this, optionally footer_dtd = [ ('mrparms', '>f4', (4,)) - ] +] header_dtype = np.dtype(header_dtd) footer_dtype = np.dtype(footer_dtd) @@ -47,13 +47,13 @@ # caveat 2: Note that the bytespervox you get is in str ( not an int) _dtdefs = ( # code, conversion function, dtype, bytes per voxel (0, 'uint8', '>u1', '1', 'MRI_UCHAR', np.uint8, np.dtype(np.uint8), - np.dtype(np.uint8).newbyteorder('>')), + np.dtype(np.uint8).newbyteorder('>')), (4, 'int16', '>i2', '2', 'MRI_SHORT', np.int16, np.dtype(np.int16), - np.dtype(np.int16).newbyteorder('>')), + np.dtype(np.int16).newbyteorder('>')), (1, 'int32', '>i4', '4', 'MRI_INT', np.int32, np.dtype(np.int32), - np.dtype(np.int32).newbyteorder('>')), + np.dtype(np.int32).newbyteorder('>')), (3, 'float', '>f4', '4', 'MRI_FLOAT', np.float32, np.dtype(np.float32), - np.dtype(np.float32).newbyteorder('>'))) + np.dtype(np.float32).newbyteorder('>'))) # make full code alias bank, including dtype column data_type_codes = Recoder(_dtdefs, fields=('code', 'label', 'dtype', diff --git a/nibabel/gifti/gifti.py b/nibabel/gifti/gifti.py index 60b6aae455..ed48de8a6b 100644 --- a/nibabel/gifti/gifti.py +++ b/nibabel/gifti/gifti.py @@ -27,6 +27,7 @@ class GiftiMetaData(xml.XmlSerializable): """ A list of GiftiNVPairs in stored in the list self.data """ + def __init__(self, nvpair=None): self.data = [] if nvpair is not None: @@ -189,6 +190,7 @@ def print_summary(self): @np.deprecate_with_doc("This is an internal API that will be discontinued.") def data_tag(dataarray, encoding, datatype, ordering): class DataTag(xml.XmlSerializable): + def __init__(self, *args): self.args = args diff --git a/nibabel/gifti/parse_gifti_fast.py b/nibabel/gifti/parse_gifti_fast.py index 5a5b92856a..85973db40c 100644 --- a/nibabel/gifti/parse_gifti_fast.py +++ b/nibabel/gifti/parse_gifti_fast.py @@ -279,8 +279,7 @@ def CharacterDataHandler(self, data): self._char_blocks.append(data) def flush_chardata(self): - """ Collate and process collected character data - """ + """ Collate and process collected character data""" if self._char_blocks is None: return # Just join the strings to get the data. Maybe there are some memory @@ -327,18 +326,18 @@ def flush_chardata(self): @property def pending_data(self): - " True if there is character data pending for processing " + """True if there is character data pending for processing""" return self._char_blocks is not None class Outputter(GiftiImageParser): + @np.deprecate_with_doc("Use GiftiImageParser instead.") def __init__(self): super(Outputter, self).__init__() def initialize(self): - """ Initialize outputter - """ + """ Initialize outputter""" self.__init__() diff --git a/nibabel/imageclasses.py b/nibabel/imageclasses.py index 7c224e1af9..239b4fea56 100644 --- a/nibabel/imageclasses.py +++ b/nibabel/imageclasses.py @@ -34,6 +34,7 @@ # DEPRECATED: mapping of names to classes and class functionality class ClassMapDict(dict): + def __getitem__(self, *args, **kwargs): warnings.warn("class_map is deprecated.", DeprecationWarning, stacklevel=2) @@ -88,6 +89,7 @@ def __getitem__(self, *args, **kwargs): class ExtMapRecoder(Recoder): + def __getitem__(self, *args, **kwargs): warnings.warn("ext_map is deprecated.", DeprecationWarning, stacklevel=2) diff --git a/nibabel/imageglobals.py b/nibabel/imageglobals.py index 0fc6dd3033..91ebaf38ea 100644 --- a/nibabel/imageglobals.py +++ b/nibabel/imageglobals.py @@ -34,6 +34,7 @@ class ErrorLevel(object): """ Context manager to set log error level """ + def __init__(self, level): self.level = level @@ -50,6 +51,7 @@ def __exit__(self, exc, value, tb): class LoggingOutputSuppressor(object): """Context manager to prevent global logger from printing""" + def __enter__(self): self.orig_handlers = logger.handlers for handler in self.orig_handlers: diff --git a/nibabel/info.py b/nibabel/info.py index 52f8e62cc1..6a44560a44 100644 --- a/nibabel/info.py +++ b/nibabel/info.py @@ -108,23 +108,23 @@ PYDICOM_MIN_VERSION = '0.9.7' # Main setup parameters -NAME = 'nibabel' +NAME = 'nibabel' MAINTAINER = "Matthew Brett, Michael Hanke, Eric Larson, " \ "Chris Markiewicz" -MAINTAINER_EMAIL = "neuroimaging@python.org" -DESCRIPTION = description -LONG_DESCRIPTION = long_description -URL = "http://nipy.org/nibabel" -DOWNLOAD_URL = "https://github.com/nipy/nibabel" -LICENSE = "MIT license" -CLASSIFIERS = CLASSIFIERS -AUTHOR = "Matthew Brett, Michael Hanke, Stephan Gerhard" -AUTHOR_EMAIL = "neuroimaging@python.org" -PLATFORMS = "OS Independent" -MAJOR = _version_major -MINOR = _version_minor -MICRO = _version_micro -ISRELEASE = _version_extra == '' -VERSION = __version__ -PROVIDES = ["nibabel", 'nisext'] -REQUIRES = ["numpy (>=%s)" % NUMPY_MIN_VERSION] +MAINTAINER_EMAIL = "neuroimaging@python.org" +DESCRIPTION = description +LONG_DESCRIPTION = long_description +URL = "http://nipy.org/nibabel" +DOWNLOAD_URL = "https://github.com/nipy/nibabel" +LICENSE = "MIT license" +CLASSIFIERS = CLASSIFIERS +AUTHOR = "Matthew Brett, Michael Hanke, Stephan Gerhard" +AUTHOR_EMAIL = "neuroimaging@python.org" +PLATFORMS = "OS Independent" +MAJOR = _version_major +MINOR = _version_minor +MICRO = _version_micro +ISRELEASE = _version_extra == '' +VERSION = __version__ +PROVIDES = ["nibabel", 'nisext'] +REQUIRES = ["numpy (>=%s)" % NUMPY_MIN_VERSION] diff --git a/nibabel/keywordonly.py b/nibabel/keywordonly.py index 4cf0ed8bae..8cb4908c1e 100644 --- a/nibabel/keywordonly.py +++ b/nibabel/keywordonly.py @@ -25,4 +25,4 @@ def kw_only_meth(n): The method has at least one positional argument ``self`` or ``cls``; allow for that. """ - return kw_only_func(n+1) + return kw_only_func(n + 1) diff --git a/nibabel/minc1.py b/nibabel/minc1.py index f5fc0ac918..74cc68d40a 100644 --- a/nibabel/minc1.py +++ b/nibabel/minc1.py @@ -27,9 +27,10 @@ ('h', 'signed__'): np.int16, ('i', 'unsigned'): np.uint32, ('i', 'signed__'): np.int32, - } +} -# See https://en.wikibooks.org/wiki/MINC/Reference/MINC1-programmers-guide#MINC_specific_convenience_functions +# See +# https://en.wikibooks.org/wiki/MINC/Reference/MINC1-programmers-guide#MINC_specific_convenience_functions _default_dir_cos = { 'xspace': [1, 0, 0], 'yspace': [0, 1, 0], @@ -47,6 +48,7 @@ class Minc1File(object): this only when reading a MINC file, to pull out useful header information, and for the method of reading the data out ''' + def __init__(self, mincfile): self._mincfile = mincfile self._image = mincfile.variables['image'] @@ -112,7 +114,7 @@ def get_affine(self): steps[i] = dim.step starts[i] = dim.start origin = np.dot(rot_mat, starts) - aff = np.eye(nspatial+1) + aff = np.eye(nspatial + 1) aff[:nspatial, :nspatial] = rot_mat * steps aff[:nspatial, nspatial] = origin return aff @@ -209,7 +211,7 @@ def _normalize(self, data, sliceobj=()): i_slicer += broad_part imax = self._get_array(image_max)[i_slicer] imin = self._get_array(image_min)[i_slicer] - slope = (imax-imin) / (dmax-dmin) + slope = (imax - imin) / (dmax - dmin) inter = (imin - dmin * slope) out_data *= slope out_data += inter @@ -243,6 +245,7 @@ class MincImageArrayProxy(object): The array proxy allows us to freeze the passed fileobj and header such that it returns the expected data array. ''' + def __init__(self, minc_file): self.minc_file = minc_file self._shape = minc_file.get_data_shape() @@ -280,6 +283,7 @@ def data_from_fileobj(self, fileobj): class Minc1Header(MincHeader): + @classmethod def may_contain_header(klass, binaryblock): return binaryblock[:4] == b'CDF\x01' diff --git a/nibabel/minc2.py b/nibabel/minc2.py index 393fa02180..2782f15146 100644 --- a/nibabel/minc2.py +++ b/nibabel/minc2.py @@ -36,6 +36,7 @@ class Hdf5Bunch(object): """ Make object for accessing attributes of variable """ + def __init__(self, var): for name, value in var.attrs.items(): setattr(self, name, value) @@ -48,6 +49,7 @@ class Minc2File(Minc1File): this only when reading a MINC2 file, to pull out useful header information, and for the method of reading the data out ''' + def __init__(self, mincfile): self._mincfile = mincfile minc_part = mincfile['minc-2.0'] @@ -135,6 +137,7 @@ def get_scaled_data(self, sliceobj=()): class Minc2Header(MincHeader): + @classmethod def may_contain_header(klass, binaryblock): return binaryblock[:4] == b'\211HDF' diff --git a/nibabel/nicom/csareader.py b/nibabel/nicom/csareader.py index e3e20cea8e..c42e37fb2a 100644 --- a/nibabel/nicom/csareader.py +++ b/nibabel/nicom/csareader.py @@ -16,7 +16,7 @@ 'SL': int, # signed long 'UL': int, # unsigned long 'IS': int, # integer string - } +} MAX_CSA_ITEMS = 199 @@ -152,7 +152,7 @@ def read(csa_str): # go to 4 byte boundary plus4 = item_len % 4 if plus4 != 0: - up_str.ptr += (4-plus4) + up_str.ptr += (4 - plus4) tag['items'] = items csa_dict['tags'][name] = tag return csa_dict diff --git a/nibabel/nicom/dwiparams.py b/nibabel/nicom/dwiparams.py index c37f85701a..e9d05c0d57 100644 --- a/nibabel/nicom/dwiparams.py +++ b/nibabel/nicom/dwiparams.py @@ -106,19 +106,19 @@ def nearest_pos_semi_def(B): lam1a, lam2a, lam3a = vals scalers = np.zeros((3,)) if cardneg == 2: - b112 = np.max([0, lam1a+(lam2a+lam3a)/3.]) + b112 = np.max([0, lam1a + (lam2a + lam3a) / 3.]) scalers[0] = b112 elif cardneg == 1: - lam1b = lam1a+0.25*lam3a - lam2b = lam2a+0.25*lam3a + lam1b = lam1a + 0.25 * lam3a + lam2b = lam2a + 0.25 * lam3a if lam1b >= 0 and lam2b >= 0: scalers[:2] = lam1b, lam2b else: # one of the lam1b, lam2b is < 0 if lam2b < 0: - b111 = np.max([0, lam1a+(lam2a+lam3a)/3.]) + b111 = np.max([0, lam1a + (lam2a + lam3a) / 3.]) scalers[0] = b111 if lam1b < 0: - b221 = np.max([0, lam2a+(lam1a+lam3a)/3.]) + b221 = np.max([0, lam2a + (lam1a + lam3a) / 3.]) scalers[1] = b221 # resort the scalers to match the original vecs scalers = scalers[np.argsort(inds)] diff --git a/nibabel/nicom/structreader.py b/nibabel/nicom/structreader.py index 7f1fb7bd09..c40975b168 100644 --- a/nibabel/nicom/structreader.py +++ b/nibabel/nicom/structreader.py @@ -27,6 +27,7 @@ class Unpacker(object): >>> upk.ptr 7 ''' + def __init__(self, buf, ptr=0, endian=None): ''' Initialize unpacker diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index fc188d9201..144f600198 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -71,7 +71,7 @@ ('srow_z', 'f4', (4,)), # 312; 3rd row affine transform ('intent_name', 'S16'), # 328; name or meaning of data ('magic', 'S4') # 344; must be 'ni1\0' or 'n+1\0' - ] +] # Full header numpy dtype header_dtype = np.dtype(header_dtd) @@ -110,7 +110,7 @@ ('G', 'u1'), ('B', 'u1'), ('A', 'u1')]), "NIFTI_TYPE_RGBA32"), - ) +) # Make full code alias bank, including dtype column data_type_codes = make_dt_codes(_dtdefs) @@ -242,7 +242,7 @@ "NIFTI_INTENT_CONNECTIVITY_PARCELLATED_TIME"), (3005, 'trajectory connectivity', (), 'NIFTI_INTENT_CONNECTIVITY_CONNECTIVITY_TRAJECTORY'), - ), fields=('code', 'label', 'parameters', 'niistring')) +), fields=('code', 'label', 'parameters', 'niistring')) class Nifti1Extension(object): @@ -252,6 +252,7 @@ class Nifti1Extension(object): as `comment`. More sophisticated extensions should/will be supported by dedicated subclasses. """ + def __init__(self, code, content): """ Parameters @@ -392,13 +393,14 @@ def write_to(self, fileobj, byteswap): (12, "workflow_fwds", Nifti1Extension), (14, "freesurfer", Nifti1Extension), (16, "pypickle", Nifti1Extension) - ), +), fields=('code', 'label', 'handler')) class Nifti1Extensions(list): """Simple extension collection, implemented as a list-subclass. """ + def count(self, ecode): """Returns the number of extensions matching a given *ecode*. @@ -1139,9 +1141,9 @@ def get_dim_info(self): freq = info & 3 phase = (info >> 2) & 3 slice = (info >> 4) & 3 - return (freq-1 if freq else None, - phase-1 if phase else None, - slice-1 if slice else None) + return (freq - 1 if freq else None, + phase - 1 if phase else None, + slice - 1 if slice else None) def set_dim_info(self, freq=None, phase=None, slice=None): ''' Sets nifti MRI slice etc dimension information @@ -1182,11 +1184,11 @@ def set_dim_info(self, freq=None, phase=None, slice=None): raise HeaderDataError('Inputs must be in [None, 0, 1, 2]') info = 0 if freq is not None: - info = info | ((freq+1) & 3) + info = info | ((freq + 1) & 3) if phase is not None: - info = info | (((phase+1) & 3) << 2) + info = info | (((phase + 1) & 3) << 2) if slice is not None: - info = info | (((slice+1) & 3) << 4) + info = info | (((slice + 1) & 3) << 4) self._structarr['dim_info'] = info def get_intent(self, code_repr='label'): @@ -1226,7 +1228,7 @@ def get_intent(self, code_repr='label'): else: raise TypeError('repr can be "label" or "code"') n_params = len(recoder.parameters[code]) - params = (float(hdr['intent_p%d' % (i+1)]) for i in range(n_params)) + params = (float(hdr['intent_p%d' % (i + 1)]) for i in range(n_params)) name = asstr(np.asscalar(hdr['intent_name'])) return label, tuple(params), name @@ -1282,7 +1284,7 @@ def set_intent(self, code, params=(), name=''): all_params = [0] * 3 all_params[:len(params)] = params[:] for i, param in enumerate(all_params): - hdr['intent_p%d' % (i+1)] = param + hdr['intent_p%d' % (i + 1)] = param hdr['intent_code'] = icode hdr['intent_name'] = name @@ -1383,15 +1385,15 @@ def get_slice_times(self): if slice_start < 0: raise HeaderDataError('slice_start should be >= 0') if slice_end == 0: - slice_end = slice_len-1 + slice_end = slice_len - 1 n_timed = slice_end - slice_start + 1 if n_timed < 1: raise HeaderDataError('slice_end should be > slice_start') st_order = self._slice_time_order(slabel, n_timed) times = st_order * duration - return ((None,)*slice_start + + return ((None,) * slice_start + tuple(times) + - (None,)*(slice_len-slice_end-1)) + (None,) * (slice_len - slice_end - 1)) def set_slice_times(self, slice_times): ''' Set slice times into *hdr* @@ -1432,9 +1434,9 @@ def set_slice_times(self, slice_times): raise HeaderDataError('Not all slice times can be None') for ind, time in enumerate(slice_times[::-1]): if time is not None: - slice_end = slice_len-ind-1 + slice_end = slice_len - ind - 1 break - timed = slice_times[slice_start:slice_end+1] + timed = slice_times[slice_start:slice_end + 1] for time in timed: if time is None: raise HeaderDataError('Cannot have None in middle ' diff --git a/nibabel/nifti2.py b/nibabel/nifti2.py index b2f4be0054..93c8ebfe5e 100644 --- a/nibabel/nifti2.py +++ b/nibabel/nifti2.py @@ -117,7 +117,7 @@ ('intent_name', 'S16'), # 508; name or meaning of data ('dim_info', 'u1'), # 524; MRI slice ordering code ('unused_str', 'S15'), # 525; unused, filled with \0 - ] # total 540 +] # total 540 # Full header numpy dtype header_dtype = np.dtype(header_dtd) diff --git a/nibabel/onetime.py b/nibabel/onetime.py index 4fdaaecf7a..f5947e92fd 100644 --- a/nibabel/onetime.py +++ b/nibabel/onetime.py @@ -113,6 +113,7 @@ class OneTimeProperty(object): This is meant to be used mostly by the auto_attr decorator in this module. """ + def __init__(self, func): """Create a OneTimeProperty instance. diff --git a/nibabel/openers.py b/nibabel/openers.py index 78e9be326e..fafde811b1 100644 --- a/nibabel/openers.py +++ b/nibabel/openers.py @@ -33,7 +33,7 @@ class BufferedGzipFile(gzip.GzipFile): # It also helps limit the exposure to this code. if sys.version_info[:3] == (3, 5, 0): def __init__(self, fileish, mode='rb', compresslevel=9, - buffer_size=2**32-1): + buffer_size=2**32 - 1): super(BufferedGzipFile, self).__init__(fileish, mode=mode, compresslevel=compresslevel) self.buffer_size = buffer_size diff --git a/nibabel/orientations.py b/nibabel/orientations.py index 5721bbbde6..fb16eaa756 100644 --- a/nibabel/orientations.py +++ b/nibabel/orientations.py @@ -51,7 +51,7 @@ def io_orientation(affine, tol=None): which can happen when p > q, then this row should be considered dropped. ''' affine = np.asarray(affine) - q, p = affine.shape[0]-1, affine.shape[1]-1 + q, p = affine.shape[0] - 1, affine.shape[1] - 1 # extract the underlying rotation, zoom, shear matrix RZS = affine[:q, :p] zooms = np.sqrt(np.sum(RZS * RZS, axis=0)) diff --git a/nibabel/parrec.py b/nibabel/parrec.py index af3a5ede5f..7f73bfc580 100644 --- a/nibabel/parrec.py +++ b/nibabel/parrec.py @@ -115,15 +115,15 @@ # These come from looking at transverse, sagittal, coronal datasets where we # can see the LR, PA, SI orientation of the slice axes from the scanned object ACQ_TO_PSL = dict( - transverse=np.array([[0, 1, 0, 0], # P - [0, 0, 1, 0], # S - [1, 0, 0, 0], # L - [0, 0, 0, 1]]), + transverse=np.array([[0, 1, 0, 0], # P + [0, 0, 1, 0], # S + [1, 0, 0, 0], # L + [0, 0, 0, 1]]), sagittal=np.diag([1, -1, -1, 1]), - coronal=np.array([[0, 0, 1, 0], # P - [0, -1, 0, 0], # S - [1, 0, 0, 0], # L - [0, 0, 0, 1]]) + coronal=np.array([[0, 0, 1, 0], # P + [0, -1, 0, 0], # S + [1, 0, 0, 0], # L + [0, 0, 0, 1]]) ) # General information dict definitions @@ -167,7 +167,7 @@ 'Max. number of gradient orients': ('max_gradient_orient', int), # Line below added for par / rec version > 4.1 'Number of label types <0=no ASL>': ('nr_label_types', int), - } +} # Image information as coded into a numpy structured array # header items order per image definition line @@ -217,7 +217,7 @@ ('contrast type', 'S30'), # XXX might be too short? ('diffusion anisotropy type', 'S30'), # XXX might be too short? ('diffusion', float, (3,)), - ] +] # Extra image def fields for 4.2 compared to 4.1 image_def_dtds['V4.2'] = image_def_dtds['V4.1'] + [ @@ -536,6 +536,7 @@ def exts2pars(exts_source): class PARRECArrayProxy(object): + @kw_only_meth(2) def __init__(self, file_like, header, mmap=True, scaling='dv'): """ Initialize PARREC array proxy @@ -617,6 +618,7 @@ def __getitem__(self, slicer): class PARRECHeader(SpatialHeader): """PAR/REC header""" + def __init__(self, info, image_defs, permit_truncated=False): """ Parameters diff --git a/nibabel/quaternions.py b/nibabel/quaternions.py index c9808cff67..7c8c193297 100644 --- a/nibabel/quaternions.py +++ b/nibabel/quaternions.py @@ -137,19 +137,19 @@ def quat2mat(q): True ''' w, x, y, z = q - Nq = w*w + x*x + y*y + z*z + Nq = w * w + x * x + y * y + z * z if Nq < FLOAT_EPS: return np.eye(3) - s = 2.0/Nq - X = x*s - Y = y*s - Z = z*s - wX, wY, wZ = w*X, w*Y, w*Z - xX, xY, xZ = x*X, x*Y, x*Z - yY, yZ, zZ = y*Y, y*Z, z*Z - return np.array([[1.0-(yY+zZ), xY-wZ, xZ+wY], - [xY+wZ, 1.0-(xX+zZ), yZ-wX], - [xZ-wY, yZ+wX, 1.0-(xX+yY)]]) + s = 2.0 / Nq + X = x * s + Y = y * s + Z = z * s + wX, wY, wZ = w * X, w * Y, w * Z + xX, xY, xZ = x * X, x * Y, x * Z + yY, yZ, zZ = y * Y, y * Z, z * Z + return np.array([[1.0 - (yY + zZ), xY - wZ, xZ + wY], + [xY + wZ, 1.0 - (xX + zZ), yZ - wX], + [xZ - wY, yZ + wX, 1.0 - (xX + yY)]]) def mat2quat(M): @@ -202,11 +202,11 @@ def mat2quat(M): Qxx, Qyx, Qzx, Qxy, Qyy, Qzy, Qxz, Qyz, Qzz = M.flat # Fill only lower half of symmetric matrix K = np.array([ - [Qxx - Qyy - Qzz, 0, 0, 0 ], - [Qyx + Qxy, Qyy - Qxx - Qzz, 0, 0 ], - [Qzx + Qxz, Qzy + Qyz, Qzz - Qxx - Qyy, 0 ], - [Qyz - Qzy, Qzx - Qxz, Qxy - Qyx, Qxx + Qyy + Qzz]] - ) / 3.0 + [Qxx - Qyy - Qzz, 0, 0, 0], + [Qyx + Qxy, Qyy - Qxx - Qzz, 0, 0], + [Qzx + Qxz, Qzy + Qyz, Qzz - Qxx - Qyy, 0], + [Qyz - Qzy, Qzx - Qxz, Qxy - Qyx, Qxx + Qyy + Qzz]] + ) / 3.0 # Use Hermitian eigenvectors, values for speed vals, vecs = np.linalg.eigh(K) # Select largest eigenvector, reorder to w,x,y,z quaternion @@ -236,10 +236,10 @@ def mult(q1, q2): ''' w1, x1, y1, z1 = q1 w2, x2, y2, z2 = q2 - w = w1*w2 - x1*x2 - y1*y2 - z1*z2 - x = w1*x2 + x1*w2 + y1*z2 - z1*y2 - y = w1*y2 + y1*w2 + z1*x2 - x1*z2 - z = w1*z2 + z1*w2 + x1*y2 - y1*x2 + w = w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2 + x = w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2 + y = w1 * y2 + y1 * w2 + z1 * x2 - x1 * z2 + z = w1 * z2 + z1 * w2 + x1 * y2 - y1 * x2 return np.array([w, x, y, z]) @@ -425,18 +425,18 @@ def angle_axis2mat(theta, vector, is_normalized=False): ''' x, y, z = vector if not is_normalized: - n = math.sqrt(x*x + y*y + z*z) - x = x/n - y = y/n - z = z/n + n = math.sqrt(x * x + y * y + z * z) + x = x / n + y = y / n + z = z / n c, s = math.cos(theta), math.sin(theta) C = 1 - c - xs, ys, zs = x*s, y*s, z*s - xC, yC, zC = x*C, y*C, z*C - xyC, yzC, zxC = x*yC, y*zC, z*xC - return np.array([[x*xC+c, xyC-zs, zxC+ys], - [xyC+zs, y*yC+c, yzC-xs], - [zxC-ys, yzC+xs, z*zC+c]]) + xs, ys, zs = x * s, y * s, z * s + xC, yC, zC = x * C, y * C, z * C + xyC, yzC, zxC = x * yC, y * zC, z * xC + return np.array([[x * xC + c, xyC - zs, zxC + ys], + [xyC + zs, y * yC + c, yzC - xs], + [zxC - ys, yzC + xs, z * zC + c]]) def quat2angle_axis(quat, identity_thresh=None): @@ -486,7 +486,7 @@ def quat2angle_axis(quat, identity_thresh=None): identity_thresh = np.finfo(vec.dtype).eps * 3 except ValueError: # integer type identity_thresh = FLOAT_EPS * 3 - n = math.sqrt(x*x + y*y + z*z) + n = math.sqrt(x * x + y * y + z * z) if n < identity_thresh: # if vec is nearly 0,0,0, this is an identity rotation return 0.0, np.array([1.0, 0, 0]) diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index 72bf7dbc58..c684b69e38 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -225,7 +225,7 @@ def set_data_shape(self, shape): self._shape = tuple([int(s) for s in shape]) # set any unset zooms to 1.0 nzs = min(len(self._zooms), ndim) - self._zooms = self._zooms[:nzs] + (1.0,) * (ndim-nzs) + self._zooms = self._zooms[:nzs] + (1.0,) * (ndim - nzs) def get_zooms(self): return self._zooms @@ -308,6 +308,7 @@ def supported_np_types(obj): class Header(SpatialHeader): '''Alias for SpatialHeader; kept for backwards compatibility.''' + def __init__(self, *args, **kwargs): warnings.warn('Header is deprecated, use SpatialHeader', DeprecationWarning, stacklevel=2) diff --git a/nibabel/spm2analyze.py b/nibabel/spm2analyze.py index 7ab93d4514..75e59019e9 100644 --- a/nibabel/spm2analyze.py +++ b/nibabel/spm2analyze.py @@ -14,7 +14,7 @@ image_dimension_dtd = spm99.image_dimension_dtd[:] image_dimension_dtd[ image_dimension_dtd.index(('funused2', 'f4')) - ] = ('scl_inter', 'f4') +] = ('scl_inter', 'f4') # Full header numpy dtype combined across sub-fields header_dtype = np.dtype(spm99.header_key_dtd + diff --git a/nibabel/spm99analyze.py b/nibabel/spm99analyze.py index fdf6c2d31a..bb92305d18 100644 --- a/nibabel/spm99analyze.py +++ b/nibabel/spm99analyze.py @@ -26,12 +26,12 @@ image_dimension_dtd = analyze.image_dimension_dtd[:] image_dimension_dtd[ image_dimension_dtd.index(('funused1', 'f4')) - ] = ('scl_slope', 'f4') +] = ('scl_slope', 'f4') # originator text field used as image origin (translations) data_history_dtd = analyze.data_history_dtd[:] data_history_dtd[ data_history_dtd.index(('originator', 'S10')) - ] = ('origin', 'i2', (5,)) +] = ('origin', 'i2', (5,)) # Full header numpy dtype combined across sub-fields header_dtype = np.dtype(header_key_dtd + @@ -148,10 +148,10 @@ def get_origin_affine(self): origin = hdr['origin'][:3] dims = hdr['dim'][1:4] if (np.any(origin) and - np.all(origin > -dims) and np.all(origin < dims*2)): - origin = origin-1 + np.all(origin > -dims) and np.all(origin < dims * 2)): + origin = origin - 1 else: - origin = (dims-1) / 2.0 + origin = (dims - 1) / 2.0 aff = np.eye(4) aff[:3, :3] = np.diag(zooms) aff[:3, -1] = -origin * zooms @@ -223,7 +223,7 @@ def _chk_origin(hdr, fix=False): origin = hdr['origin'][0:3] dims = hdr['dim'][1:4] if (not np.any(origin) or - (np.all(origin > -dims) and np.all(origin < dims*2))): + (np.all(origin > -dims) and np.all(origin < dims * 2))): return hdr, rep rep.problem_level = 20 rep.problem_msg = 'very large origin values relative to dims' diff --git a/nibabel/tmpdirs.py b/nibabel/tmpdirs.py index a03388311c..8c1b704260 100644 --- a/nibabel/tmpdirs.py +++ b/nibabel/tmpdirs.py @@ -31,6 +31,7 @@ class TemporaryDirectory(object): >>> os.path.exists(tmpdir) False """ + def __init__(self, suffix="", prefix=template, dir=None): self.name = mkdtemp(suffix, prefix, dir) self._closed = False @@ -64,6 +65,7 @@ class InTemporaryDirectory(TemporaryDirectory): >>> os.getcwd() == my_cwd True ''' + def __enter__(self): self._pwd = os.getcwd() os.chdir(self.name) @@ -97,6 +99,7 @@ class InGivenDirectory(object): fix, and finally replace ``InGivenDirectory`` with ``InTemporaryDirectory`` again. """ + def __init__(self, path=None): """ Initialize directory context manager diff --git a/nibabel/trackvis.py b/nibabel/trackvis.py index 561069b8b8..89e3d3f1b0 100644 --- a/nibabel/trackvis.py +++ b/nibabel/trackvis.py @@ -45,7 +45,7 @@ ('n_count', 'i4'), ('version', 'i4'), ('hdr_size', 'i4'), - ] +] # Version 2 adds a 4x4 matrix giving the affine transformation going # from voxel coordinates in the referenced 3D voxel matrix, to xyz @@ -75,7 +75,7 @@ ('n_count', 'i4'), ('version', 'i4'), ('hdr_size', 'i4'), - ] +] # Full header numpy dtypes header_1_dtype = np.dtype(header_1_dtd) @@ -250,7 +250,7 @@ def track_gen(): return streamlines, hdr -def write(fileobj, streamlines, hdr_mapping=None, endianness=None, +def write(fileobj, streamlines, hdr_mapping=None, endianness=None, points_space=None): ''' Write header and `streamlines` to trackvis file `fileobj` @@ -792,6 +792,7 @@ class TrackvisFile(object): space. If 'points_space' is not None, you can use this to give the relationship between voxels, rasmm and voxmm space (above). ''' + def __init__(self, streamlines, mapping=None, diff --git a/nibabel/tripwire.py b/nibabel/tripwire.py index e4967ee69e..3850281587 100644 --- a/nibabel/tripwire.py +++ b/nibabel/tripwire.py @@ -42,6 +42,7 @@ class TripWire(object): ... TripWireError: We do not have a_module """ + def __init__(self, msg): self._msg = msg diff --git a/nibabel/volumeutils.py b/nibabel/volumeutils.py index 3e96dde8c8..5fa08b78c5 100644 --- a/nibabel/volumeutils.py +++ b/nibabel/volumeutils.py @@ -76,6 +76,7 @@ class Recoder(object): >>> recodes[2] 2 ''' + def __init__(self, codes, fields=('code',), map_maker=dict): ''' Create recoder object @@ -234,6 +235,7 @@ class DtypeMapper(object): is a dtype, we compare (using ==) all known dtype keys to the input key, and return any matching values for the matching key. """ + def __init__(self): self._dict = {} self._dtype_keys = [] @@ -1121,7 +1123,7 @@ def scale_min_max(mn, mx, out_type, allow_intercept): [mn, mx, info['min'], info['max']], np.maximum_sctype(np.float)) # with intercept if allow_intercept: - data_range = mx-mn + data_range = mx - mn if data_range == 0: return 1.0, mn type_range = type_max - type_min @@ -1496,7 +1498,7 @@ def shape_zoom_affine(shape, zooms, x_flip=True): if x_flip: zooms[0] *= -1 # Get translations from center of image - origin = (shape-1) / 2.0 + origin = (shape - 1) / 2.0 aff = np.eye(4) aff[:3, :3] = np.diag(zooms) aff[:3, -1] = -origin * zooms @@ -1542,9 +1544,9 @@ class BinOpener(Opener): def __init__(self, *args, **kwargs): warnings.warn("Please use %s class instead of %s" % ( - Opener.__class__.__name__, - self.__class__.__name__), - DeprecationWarning, stacklevel=2) + Opener.__class__.__name__, + self.__class__.__name__), + DeprecationWarning, stacklevel=2) return super(BinOpener, self).__init__(*args, **kwargs) diff --git a/setup.py b/setup.py index db0b69866b..d5160b4a0c 100755 --- a/setup.py +++ b/setup.py @@ -16,14 +16,15 @@ # BEFORE importing distutils, remove MANIFEST. distutils doesn't properly # update it when the contents of directories change. -if os.path.exists('MANIFEST'): os.remove('MANIFEST') +if os.path.exists('MANIFEST'): + os.remove('MANIFEST') # For some commands, use setuptools. if len(set(('develop', 'bdist_egg', 'bdist_rpm', 'bdist', 'bdist_dumb', 'install_egg_info', 'egg_info', 'easy_install', 'bdist_wheel', 'bdist_mpkg')).intersection(sys.argv)) > 0: # setup_egg imports setuptools setup, thus monkeypatching distutils. - import setup_egg + import setup_egg # noqa from distutils.core import setup @@ -42,7 +43,7 @@ tests_require=['nose'], test_suite='nose.collector', zip_safe=False, - extras_require = dict( + extras_require=dict( doc='Sphinx>=0.3', test='nose>=0.10.1'), ) From d8504c69161d79cfa5f12dcfa974a276e1e4b51e Mon Sep 17 00:00:00 2001 From: Ben Cipollini Date: Fri, 5 Feb 2016 11:04:16 -0800 Subject: [PATCH 02/11] STY: autopep8 on test code. --- nibabel/freesurfer/tests/test_mghformat.py | 2 +- nibabel/gifti/tests/test_gifti.py | 8 +- nibabel/gifti/tests/test_giftiio.py | 1 + nibabel/gifti/tests/test_parse_gifti_fast.py | 112 ++++---- nibabel/nicom/tests/data_pkgs.py | 12 +- nibabel/nicom/tests/test_csareader.py | 14 +- nibabel/nicom/tests/test_dicomreaders.py | 3 +- nibabel/nicom/tests/test_dicomwrappers.py | 44 +-- nibabel/nicom/tests/test_dwiparams.py | 12 +- nibabel/nicom/tests/test_utils.py | 4 +- nibabel/testing/__init__.py | 1 + nibabel/tests/data/check_parrec_reslice.py | 2 +- nibabel/tests/nibabel_data.py | 2 +- nibabel/tests/scriptrunner.py | 17 +- nibabel/tests/test_affines.py | 114 ++++---- nibabel/tests/test_analyze.py | 215 +++++++------- nibabel/tests/test_api_validators.py | 4 +- nibabel/tests/test_arrayproxy.py | 23 +- nibabel/tests/test_arraywriters.py | 104 +++---- nibabel/tests/test_batteryrunners.py | 25 +- nibabel/tests/test_casting.py | 54 ++-- nibabel/tests/test_data.py | 46 +-- nibabel/tests/test_deprecated.py | 4 + nibabel/tests/test_dft.py | 4 +- nibabel/tests/test_ecat.py | 82 +++--- nibabel/tests/test_ecat_data.py | 12 +- nibabel/tests/test_endiancodes.py | 1 + nibabel/tests/test_euler.py | 31 +- nibabel/tests/test_filehandles.py | 2 +- nibabel/tests/test_filename_parser.py | 18 +- nibabel/tests/test_files_interface.py | 8 +- nibabel/tests/test_fileslice.py | 75 ++--- nibabel/tests/test_floating.py | 84 +++--- nibabel/tests/test_funcs.py | 21 +- nibabel/tests/test_image_api.py | 30 +- nibabel/tests/test_image_load_save.py | 12 +- nibabel/tests/test_image_types.py | 2 +- nibabel/tests/test_keywordonly.py | 3 + nibabel/tests/test_loadsave.py | 2 +- nibabel/tests/test_minc1.py | 96 +++---- nibabel/tests/test_minc2.py | 78 ++--- nibabel/tests/test_minc2_data.py | 106 +++---- nibabel/tests/test_nifti1.py | 16 +- nibabel/tests/test_openers.py | 13 +- nibabel/tests/test_orientations.py | 282 ++++++++++--------- nibabel/tests/test_parrec.py | 177 ++++++------ nibabel/tests/test_parrec_data.py | 2 +- nibabel/tests/test_proxy_api.py | 35 ++- nibabel/tests/test_quaternions.py | 26 +- nibabel/tests/test_recoder.py | 30 +- nibabel/tests/test_round_trip.py | 12 +- nibabel/tests/test_rstutils.py | 31 +- nibabel/tests/test_scaling.py | 47 ++-- nibabel/tests/test_scripts.py | 6 +- nibabel/tests/test_spaces.py | 15 +- nibabel/tests/test_spatialimages.py | 120 ++++---- nibabel/tests/test_spm2analyze.py | 28 +- nibabel/tests/test_spm99analyze.py | 118 ++++---- nibabel/tests/test_testing.py | 6 +- nibabel/tests/test_tmpdirs.py | 1 + nibabel/tests/test_trackvis.py | 189 +++++++------ nibabel/tests/test_utils.py | 191 +++++++------ nibabel/tests/test_wrapstruct.py | 27 +- 63 files changed, 1484 insertions(+), 1378 deletions(-) diff --git a/nibabel/freesurfer/tests/test_mghformat.py b/nibabel/freesurfer/tests/test_mghformat.py index 975aeb8b9c..e456d52af3 100644 --- a/nibabel/freesurfer/tests/test_mghformat.py +++ b/nibabel/freesurfer/tests/test_mghformat.py @@ -37,7 +37,7 @@ v2r = np.array([[1, 2, 3, -13], [2, 3, 1, -11.5], [3, 1, 2, -11.5], [0, 0, 0, 1]], dtype=np.float32) # sample voxel to ras - tkr matrix (mri_info --vox2ras-tkr) -v2rtkr = np.array([[-1.0, 0.0, 0.0, 1.5], +v2rtkr = np.array([[-1.0, 0.0, 0.0, 1.5], [0.0, 0.0, 1.0, -2.5], [0.0, -1.0, 0.0, 2.0], [0.0, 0.0, 0.0, 1.0]], dtype=np.float32) diff --git a/nibabel/gifti/tests/test_gifti.py b/nibabel/gifti/tests/test_gifti.py index 73919d94e6..2cab52d167 100644 --- a/nibabel/gifti/tests/test_gifti.py +++ b/nibabel/gifti/tests/test_gifti.py @@ -24,7 +24,7 @@ def test_gifti_image(): # arguments. gi = GiftiImage() assert_equal(gi.darrays, []) - arr = np.zeros((2,3)) + arr = np.zeros((2, 3)) gi.darrays.append(arr) # Now check we didn't overwrite the default arg gi = GiftiImage() @@ -63,9 +63,9 @@ def test_gifti_image(): def test_dataarray(): for dt_code in data_type_codes.value_set(): data_type = data_type_codes.type[dt_code] - if data_type is np.void: # not supported + if data_type is np.void: # not supported continue - arr = np.zeros((10,3), dtype=data_type) + arr = np.zeros((10, 3), dtype=data_type) da = GiftiDataArray.from_array(arr, 'triangle') assert_equal(da.datatype, data_type_codes[arr.dtype]) bs_arr = arr.byteswap().newbyteorder() @@ -142,7 +142,7 @@ def assign_rgba(gl, val): def test_print_summary(): for fil in [DATA_FILE1, DATA_FILE2, DATA_FILE3, DATA_FILE4, - DATA_FILE5, DATA_FILE6]: + DATA_FILE5, DATA_FILE6]: gimg = nib.load(fil) gimg.print_summary() diff --git a/nibabel/gifti/tests/test_giftiio.py b/nibabel/gifti/tests/test_giftiio.py index 9c12b7c07a..90a87a2d09 100644 --- a/nibabel/gifti/tests/test_giftiio.py +++ b/nibabel/gifti/tests/test_giftiio.py @@ -19,6 +19,7 @@ class TestGiftiIO(object): + def setUp(self): with clear_and_catch_warnings() as w: warnings.simplefilter('always', DeprecationWarning) diff --git a/nibabel/gifti/tests/test_parse_gifti_fast.py b/nibabel/gifti/tests/test_parse_gifti_fast.py index e9511b649b..163e050734 100644 --- a/nibabel/gifti/tests/test_parse_gifti_fast.py +++ b/nibabel/gifti/tests/test_parse_gifti_fast.py @@ -45,56 +45,56 @@ numDA = [2, 1, 1, 1, 2, 1] DATA_FILE1_darr1 = np.array( - [[-16.07201 , -66.187515, 21.266994], - [-16.705893, -66.054337, 21.232786], - [-17.614349, -65.401642, 21.071466]]) -DATA_FILE1_darr2 = np.array( [0,1,2] ) - -DATA_FILE2_darr1 = np.array([[ 0.43635699], - [ 0.270017 ], - [ 0.133239 ], - [ 0.35054299], - [ 0.26538199], - [ 0.32122701], - [ 0.23495001], - [ 0.26671499], - [ 0.306851 ], - [ 0.36302799]], dtype=np.float32) + [[-16.07201, -66.187515, 21.266994], + [-16.705893, -66.054337, 21.232786], + [-17.614349, -65.401642, 21.071466]]) +DATA_FILE1_darr2 = np.array([0, 1, 2]) + +DATA_FILE2_darr1 = np.array([[0.43635699], + [0.270017], + [0.133239], + [0.35054299], + [0.26538199], + [0.32122701], + [0.23495001], + [0.26671499], + [0.306851], + [0.36302799]], dtype=np.float32) DATA_FILE3_darr1 = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0]) DATA_FILE4_darr1 = np.array([[-0.57811606], - [-0.53871965], - [-0.44602534], - [-0.56532663], - [-0.51392376], - [-0.43225467], - [-0.54646534], - [-0.48011276], - [-0.45624232], - [-0.31101292]], dtype=np.float32) - -DATA_FILE5_darr1 = np.array([[ 155.17539978, 135.58103943, 98.30715179], - [ 140.33973694, 190.0491333 , 73.24776459], - [ 157.3598938 , 196.97969055, 83.65809631], - [ 171.46174622, 137.43661499, 78.4709549 ], - [ 148.54592896, 97.06752777, 65.96373749], - [ 123.45701599, 111.46841431, 66.3571167 ], - [ 135.30892944, 202.28720093, 36.38148499], - [ 178.28155518, 162.59469604, 37.75128937], - [ 178.11087036, 115.28820038, 57.17986679], - [ 142.81582642, 82.82115173, 31.02205276]], dtype=np.float32) - -DATA_FILE5_darr2 = np.array([[ 6402, 17923, 25602], - [14085, 25602, 17923], - [25602, 14085, 4483], - [17923, 1602, 14085], - [ 4483, 25603, 25602], - [25604, 25602, 25603], - [25602, 25604, 6402], - [25603, 3525, 25604], - [ 1123, 17922, 12168], - [25604, 12168, 17922]], dtype=np.int32) + [-0.53871965], + [-0.44602534], + [-0.56532663], + [-0.51392376], + [-0.43225467], + [-0.54646534], + [-0.48011276], + [-0.45624232], + [-0.31101292]], dtype=np.float32) + +DATA_FILE5_darr1 = np.array([[155.17539978, 135.58103943, 98.30715179], + [140.33973694, 190.0491333, 73.24776459], + [157.3598938, 196.97969055, 83.65809631], + [171.46174622, 137.43661499, 78.4709549], + [148.54592896, 97.06752777, 65.96373749], + [123.45701599, 111.46841431, 66.3571167], + [135.30892944, 202.28720093, 36.38148499], + [178.28155518, 162.59469604, 37.75128937], + [178.11087036, 115.28820038, 57.17986679], + [142.81582642, 82.82115173, 31.02205276]], dtype=np.float32) + +DATA_FILE5_darr2 = np.array([[6402, 17923, 25602], + [14085, 25602, 17923], + [25602, 14085, 4483], + [17923, 1602, 14085], + [4483, 25603, 25602], + [25604, 25602, 25603], + [25602, 25604, 6402], + [25603, 3525, 25604], + [1123, 17922, 12168], + [25604, 12168, 17922]], dtype=np.int32) DATA_FILE6_darr1 = np.array([9182740, 9182740, 9182740], dtype=np.float32) @@ -107,7 +107,7 @@ def test_read_ordering(): assert_equal(img2.darrays[0].data.shape, (143479, 1)) # Read image for which we know output shape img = load(DATA_FILE1) - assert_equal(img.darrays[0].data.shape, (3,3)) + assert_equal(img.darrays[0].data.shape, (3, 3)) def test_load_metadata(): @@ -115,7 +115,7 @@ def test_load_metadata(): img = load(dat) me = img.meta assert_equal(numDA[i], img.numDA) - assert_equal(img.version,'1.0') + assert_equal(img.version, '1.0') def test_metadata_deprecations(): @@ -142,13 +142,15 @@ def test_load_dataarray1(): for img in (img1, bimg): assert_array_almost_equal(img.darrays[0].data, DATA_FILE1_darr1) assert_array_almost_equal(img.darrays[1].data, DATA_FILE1_darr2) - me=img.darrays[0].meta.metadata + me = img.darrays[0].meta.metadata assert_true('AnatomicalStructurePrimary' in me) assert_true('AnatomicalStructureSecondary' in me) assert_equal(me['AnatomicalStructurePrimary'], 'CortexLeft') - assert_array_almost_equal(img.darrays[0].coordsys.xform, np.eye(4,4)) - assert_equal(xform_codes.niistring[img.darrays[0].coordsys.dataspace],'NIFTI_XFORM_TALAIRACH') - assert_equal(xform_codes.niistring[img.darrays[0].coordsys.xformspace],'NIFTI_XFORM_TALAIRACH') + assert_array_almost_equal(img.darrays[0].coordsys.xform, np.eye(4, 4)) + assert_equal(xform_codes.niistring[img.darrays[ + 0].coordsys.dataspace], 'NIFTI_XFORM_TALAIRACH') + assert_equal(xform_codes.niistring[img.darrays[ + 0].coordsys.xformspace], 'NIFTI_XFORM_TALAIRACH') def test_load_dataarray2(): @@ -223,19 +225,19 @@ def test_readwritedata(): with InTemporaryDirectory(): save(img, 'test.gii') img2 = load('test.gii') - assert_equal(img.numDA,img2.numDA) + assert_equal(img.numDA, img2.numDA) assert_array_almost_equal(img.darrays[0].data, img2.darrays[0].data) def test_write_newmetadata(): img = gi.GiftiImage() - attr = gi.GiftiNVPairs(name = 'mykey', value = 'val1') + attr = gi.GiftiNVPairs(name='mykey', value='val1') newmeta = gi.GiftiMetaData(attr) img.meta = newmeta myme = img.meta.metadata assert_true('mykey' in myme) - newmeta = gi.GiftiMetaData.from_dict( {'mykey1' : 'val2'} ) + newmeta = gi.GiftiMetaData.from_dict({'mykey1': 'val2'}) img.meta = newmeta myme = img.meta.metadata assert_true('mykey1' in myme) diff --git a/nibabel/nicom/tests/data_pkgs.py b/nibabel/nicom/tests/data_pkgs.py index bd2babb3a8..56c135fd5b 100644 --- a/nibabel/nicom/tests/data_pkgs.py +++ b/nibabel/nicom/tests/data_pkgs.py @@ -3,14 +3,14 @@ from ... import data as nibd PUBLIC_PKG_DEF = dict( - relpath = 'nipy/dicom/public', - name = 'nipy-dicom-public', - version = '0.1') + relpath='nipy/dicom/public', + name='nipy-dicom-public', + version='0.1') PRIVATE_PKG_DEF = dict( - relpath = 'nipy/dicom/private', - name = 'nipy-dicom-private', - version = '0.1') + relpath='nipy/dicom/private', + name='nipy-dicom-private', + version='0.1') PUBLIC_DS = nibd.datasource_or_bomber(PUBLIC_PKG_DEF) diff --git a/nibabel/nicom/tests/test_csareader.py b/nibabel/nicom/tests/test_csareader.py index 4145de1c7f..33feb7eaee 100644 --- a/nibabel/nicom/tests/test_csareader.py +++ b/nibabel/nicom/tests/test_csareader.py @@ -25,9 +25,9 @@ @dicom_test def test_csa_header_read(): hdr = csa.get_csa_header(DATA, 'image') - assert_equal(hdr['n_tags'],83) - assert_equal(csa.get_csa_header(DATA,'series')['n_tags'],65) - assert_raises(ValueError, csa.get_csa_header, DATA,'xxxx') + assert_equal(hdr['n_tags'], 83) + assert_equal(csa.get_csa_header(DATA, 'series')['n_tags'], 65) + assert_raises(ValueError, csa.get_csa_header, DATA, 'xxxx') assert_true(csa.is_mosaic(hdr)) # Get a shallow copy of the data, lacking the CSA marker # Need to do it this way because del appears broken in pydicom 0.9.7 @@ -94,7 +94,7 @@ def test_csa_params(): snv = csa.get_slice_normal(csa_info) assert_equal(snv.shape, (3,)) assert_true(np.allclose(1, - np.sqrt((snv * snv).sum()))) + np.sqrt((snv * snv).sum()))) amt = csa.get_acq_mat_txt(csa_info) assert_equal(amt, '128p*128') csa_info = csa.read(CSA2_B0) @@ -106,7 +106,7 @@ def test_csa_params(): assert_equal(g_vector, None) csa_info = csa.read(CSA2_B1000) b_matrix = csa.get_b_matrix(csa_info) - assert_equal(b_matrix.shape, (3,3)) + assert_equal(b_matrix.shape, (3, 3)) # check (by absence of error) that the B matrix is positive # semi-definite. q = dwp.B2q(b_matrix) @@ -122,10 +122,10 @@ def test_ice_dims(): ex_dims0 = ['X', '1', '1', '1', '1', '1', '1', '48', '1', '1', '1', '1', '201'] ex_dims1 = ['X', '1', '1', '1', '2', '1', '1', - '48', '1', '1', '1', '1', '201'] + '48', '1', '1', '1', '1', '201'] for csa_str, ex_dims in ((CSA2_B0, ex_dims0), (CSA2_B1000, ex_dims1)): csa_info = csa.read(csa_str) assert_equal(csa.get_ice_dims(csa_info), - ex_dims) + ex_dims) assert_equal(csa.get_ice_dims({}), None) diff --git a/nibabel/nicom/tests/test_dicomreaders.py b/nibabel/nicom/tests/test_dicomreaders.py index a4588d486f..07958bd630 100644 --- a/nibabel/nicom/tests/test_dicomreaders.py +++ b/nibabel/nicom/tests/test_dicomreaders.py @@ -16,11 +16,12 @@ from numpy.testing import assert_array_equal, assert_array_almost_equal + @dicom_test def test_read_dwi(): img = didr.mosaic_to_nii(DATA) arr = img.get_data() - assert_equal(arr.shape, (128,128,48)) + assert_equal(arr.shape, (128, 128, 48)) assert_array_almost_equal(img.affine, EXPECTED_AFFINE) diff --git a/nibabel/nicom/tests/test_dicomwrappers.py b/nibabel/nicom/tests/test_dicomwrappers.py index 660b87f1b3..4832ff6729 100644 --- a/nibabel/nicom/tests/test_dicomwrappers.py +++ b/nibabel/nicom/tests/test_dicomwrappers.py @@ -63,6 +63,7 @@ 0.99997450, -0.005023611)] + @dicom_test def test_wrappers(): # test direct wrapper calls @@ -124,6 +125,7 @@ class FakeData(dict): # Check get defers to dcm_data get class FakeData2(object): + def get(self, key, default): return 1 d = FakeData2() @@ -194,9 +196,9 @@ def test_wrapper_args_kwds(): def test_dwi_params(): dw = didw.wrapper_from_data(DATA) b_matrix = dw.b_matrix - assert_equal(b_matrix.shape, (3,3)) + assert_equal(b_matrix.shape, (3, 3)) q = dw.q_vector - b = np.sqrt(np.sum(q * q)) # vector norm + b = np.sqrt(np.sum(q * q)) # vector norm g = q / b assert_array_almost_equal(b, EXPECTED_PARAMS[0]) assert_array_almost_equal(g, EXPECTED_PARAMS[1]) @@ -335,19 +337,19 @@ def test_rotation_matrix(): @dicom_test def test_use_csa_sign(): - #Test that we get the same slice normal, even after swapping the iop - #directions + # Test that we get the same slice normal, even after swapping the iop + # directions dw = didw.wrapper_from_file(DATA_FILE_SLC_NORM) iop = dw.image_orient_patient - dw.image_orient_patient = np.c_[iop[:,1], iop[:,0]] + dw.image_orient_patient = np.c_[iop[:, 1], iop[:, 0]] dw2 = didw.wrapper_from_file(DATA_FILE_SLC_NORM) assert_true(np.allclose(dw.slice_normal, dw2.slice_normal)) @dicom_test def test_assert_parallel(): - #Test that we get an AssertionError if the cross product and the CSA - #slice normal are not parallel + # Test that we get an AssertionError if the cross product and the CSA + # slice normal are not parallel dw = didw.wrapper_from_file(DATA_FILE_SLC_NORM) dw.image_orient_patient = np.c_[[1., 0., 0.], [0., 1., 0.]] assert_raises(AssertionError, dw.__getattribute__, 'slice_normal') @@ -355,8 +357,8 @@ def test_assert_parallel(): @dicom_test def test_decimal_rescale(): - #Test that we don't get back a data array with dtype np.object when our - #rescale slope is a decimal + # Test that we don't get back a data array with dtype np.object when our + # rescale slope is a decimal dw = didw.wrapper_from_file(DATA_FILE_DEC_RSCL) assert_not_equal(dw.get_data().dtype, np.object) @@ -379,7 +381,8 @@ def fake_frames(seq_name, field_name, value_seq): each element in list is obj.[0]. = value_seq[n] for n in range(N) """ - class Fake(object): pass + class Fake(object): + pass frames = [] for value in value_seq: fake_frame = Fake() @@ -394,8 +397,8 @@ class TestMultiFrameWrapper(TestCase): # Test MultiframeWrapper MINIMAL_MF = { # Minimal contents of dcm_data for this wrapper - 'PerFrameFunctionalGroupsSequence': [None], - 'SharedFunctionalGroupsSequence': [None]} + 'PerFrameFunctionalGroupsSequence': [None], + 'SharedFunctionalGroupsSequence': [None]} WRAPCLASS = didw.MultiframeWrapper def test_shape(self): @@ -417,6 +420,7 @@ def test_shape(self): # PerFrameFunctionalGroupsSequence does not match NumberOfFrames assert_raises(AssertionError, getattr, dw, 'image_shape') # Make some fake frame data for 3D + def my_fake_frames(div_seq): return fake_frames('FrameContentSequence', 'DimensionIndexValues', @@ -433,13 +437,13 @@ def my_fake_frames(div_seq): # Make some fake frame data for 4D fake_mf['NumberOfFrames'] = 6 div_seq = ((1, 1, 1), (1, 2, 1), (1, 1, 2), (1, 2, 2), - (1, 1, 3), (1, 2, 3)) + (1, 1, 3), (1, 2, 3)) frames = my_fake_frames(div_seq) fake_mf['PerFrameFunctionalGroupsSequence'] = frames assert_equal(MFW(fake_mf).image_shape, (32, 64, 2, 3)) # Check stack number matching for 4D div_seq = ((1, 1, 1), (1, 2, 1), (1, 1, 2), (1, 2, 2), - (1, 1, 3), (2, 2, 3)) + (1, 1, 3), (2, 2, 3)) frames = my_fake_frames(div_seq) fake_mf['PerFrameFunctionalGroupsSequence'] = frames assert_raises(didw.WrapperError, getattr, MFW(fake_mf), 'image_shape') @@ -468,13 +472,13 @@ def test_iop(self): [[0, 1, 0, 1, 0, 0]])[0] fake_mf['SharedFunctionalGroupsSequence'] = [fake_frame] assert_array_equal(MFW(fake_mf).image_orient_patient, - [[0, 1], [1, 0], [0, 0]]) + [[0, 1], [1, 0], [0, 0]]) fake_mf['SharedFunctionalGroupsSequence'] = [None] assert_raises(didw.WrapperError, getattr, MFW(fake_mf), 'image_orient_patient') fake_mf['PerFrameFunctionalGroupsSequence'] = [fake_frame] assert_array_equal(MFW(fake_mf).image_orient_patient, - [[0, 1], [1, 0], [0, 0]]) + [[0, 1], [1, 0], [0, 0]]) def test_voxel_sizes(self): # Test voxel size calculation @@ -528,7 +532,7 @@ def test_image_position(self): assert_array_equal(MFW(fake_mf).image_position, [-2, 3, 7]) fake_mf['SharedFunctionalGroupsSequence'] = [None] assert_raises(didw.WrapperError, - getattr, MFW(fake_mf), 'image_position') + getattr, MFW(fake_mf), 'image_position') fake_mf['PerFrameFunctionalGroupsSequence'] = [fake_frame] assert_array_equal(MFW(fake_mf).image_position, [-2, 3, 7]) # Check lists of Decimals work @@ -555,7 +559,7 @@ def test_data_real(self): data = data.byteswap() dat_str = data.tostring() assert_equal(sha1(dat_str).hexdigest(), - '149323269b0af92baa7508e19ca315240f77fa8c') + '149323269b0af92baa7508e19ca315240f77fa8c') def test_data_fake(self): # Test algorithm for get_data @@ -624,8 +628,8 @@ def test_data_fake(self): shape = (2, 3, 4, 2, 2) data = np.arange(np.prod(shape)).reshape(shape) sorted_data = data.reshape(shape[:2] + (-1,), order='F') - order = [11, 9, 10, 8, 3, 1, 2, 0, - 15, 13, 14, 12, 7, 5, 6, 4] + order = [11, 9, 10, 8, 3, 1, 2, 0, + 15, 13, 14, 12, 7, 5, 6, 4] sorted_data = sorted_data[..., np.argsort(order)] fake_mf['pixel_array'] = np.rollaxis(sorted_data, 2) assert_array_equal(MFW(fake_mf).get_data(), data * 2.0 - 1) diff --git a/nibabel/nicom/tests/test_dwiparams.py b/nibabel/nicom/tests/test_dwiparams.py index 89355cf81d..3b02367951 100644 --- a/nibabel/nicom/tests/test_dwiparams.py +++ b/nibabel/nicom/tests/test_dwiparams.py @@ -14,17 +14,17 @@ def test_b2q(): # conversion of b matrix to q - q = np.array([1,2,3]) - s = np.sqrt(np.sum(q * q)) # vector norm + q = np.array([1, 2, 3]) + s = np.sqrt(np.sum(q * q)) # vector norm B = np.outer(q, q) - assert_array_almost_equal(q*s, B2q(B)) - q = np.array([1,2,3]) + assert_array_almost_equal(q * s, B2q(B)) + q = np.array([1, 2, 3]) # check that the sign of the vector as positive x convention B = np.outer(-q, -q) - assert_array_almost_equal(q*s, B2q(B)) + assert_array_almost_equal(q * s, B2q(B)) q = np.array([-1, 2, 3]) B = np.outer(q, q) - assert_array_almost_equal(-q*s, B2q(B)) + assert_array_almost_equal(-q * s, B2q(B)) # Massive negative eigs B = np.eye(3) * -1 assert_raises(ValueError, B2q, B) diff --git a/nibabel/nicom/tests/test_utils.py b/nibabel/nicom/tests/test_utils.py index f984fb4a70..69a77617ef 100644 --- a/nibabel/nicom/tests/test_utils.py +++ b/nibabel/nicom/tests/test_utils.py @@ -57,12 +57,12 @@ def test_find_private_section_real(): assert_equal(find_private_section(ds, 0x11, re.compile(r'third\Wsectio[nN]')), - 0x1200) + 0x1200) # No match -> None assert_equal(find_private_section(ds, 0x11, re.compile(r'not third\Wsectio[nN]')), - None) + None) # If there are gaps in the sequence before the one we want, that is OK ds.add_new((0x11, 0x13), 'LO', b'near section') assert_equal(find_private_section(ds, 0x11, 'near section'), 0x1300) diff --git a/nibabel/testing/__init__.py b/nibabel/testing/__init__.py index 3918526c6e..21c8713dff 100644 --- a/nibabel/testing/__init__.py +++ b/nibabel/testing/__init__.py @@ -162,6 +162,7 @@ class suppress_warnings(error_warnings): class catch_warn_reset(clear_and_catch_warnings): + def __init__(self, *args, **kwargs): warnings.warn('catch_warn_reset is deprecated and will be removed in ' 'nibabel v3.0; use nibabel.testing.clear_and_catch_warnings.', diff --git a/nibabel/tests/data/check_parrec_reslice.py b/nibabel/tests/data/check_parrec_reslice.py index 1ff24d7f71..cc2a5942b5 100644 --- a/nibabel/tests/data/check_parrec_reslice.py +++ b/nibabel/tests/data/check_parrec_reslice.py @@ -43,7 +43,7 @@ def resample_img2img(img_to, img_from, order=1, out_class=nib.Nifti1Image): rzs, trans, img_to.shape, - order = order) + order=order) return out_class(data, img_to.affine) diff --git a/nibabel/tests/nibabel_data.py b/nibabel/tests/nibabel_data.py index f434c7b026..2ff473fd1d 100644 --- a/nibabel/tests/nibabel_data.py +++ b/nibabel/tests/nibabel_data.py @@ -23,7 +23,7 @@ def get_nibabel_data(): return nibabel_data if isdir(nibabel_data) else '' -def needs_nibabel_data(subdir = None): +def needs_nibabel_data(subdir=None): """ Decorator for tests needing nibabel-data Parameters diff --git a/nibabel/tests/scriptrunner.py b/nibabel/tests/scriptrunner.py index 35c450ed0e..a82fdaa1e8 100644 --- a/nibabel/tests/scriptrunner.py +++ b/nibabel/tests/scriptrunner.py @@ -18,9 +18,9 @@ from subprocess import Popen, PIPE -try: # Python 2 +try: # Python 2 string_types = basestring, -except NameError: # Python 3 +except NameError: # Python 3 string_types = str, @@ -33,7 +33,7 @@ def _get_package(): # Same as __package__ for Python 2.6, 2.7 and >= 3.3 -MY_PACKAGE=_get_package() +MY_PACKAGE = _get_package() def local_script_dir(script_sdir): @@ -66,12 +66,13 @@ class ScriptRunner(object): Finds local scripts and local modules if running in the development directory, otherwise finds system scripts and modules. """ + def __init__(self, - script_sdir = 'scripts', - module_sdir = MY_PACKAGE, - debug_print_var = None, - output_processor = lambda x : x - ): + script_sdir='scripts', + module_sdir=MY_PACKAGE, + debug_print_var=None, + output_processor=lambda x: x + ): """ Init ScriptRunner instance Parameters diff --git a/nibabel/tests/test_affines.py b/nibabel/tests/test_affines.py index 18a8891526..c950dfefac 100644 --- a/nibabel/tests/test_affines.py +++ b/nibabel/tests/test_affines.py @@ -9,7 +9,7 @@ from nose.tools import assert_equal, assert_raises from numpy.testing import assert_array_equal, assert_almost_equal, \ - assert_array_almost_equal + assert_array_almost_equal def validated_apply_affine(T, xyz): @@ -17,71 +17,71 @@ def validated_apply_affine(T, xyz): # to test against xyz = np.asarray(xyz) shape = xyz.shape[0:-1] - XYZ = np.dot(np.reshape(xyz, (np.prod(shape), 3)), T[0:3,0:3].T) - XYZ[:,0] += T[0,3] - XYZ[:,1] += T[1,3] - XYZ[:,2] += T[2,3] - XYZ = np.reshape(XYZ, shape+(3,)) + XYZ = np.dot(np.reshape(xyz, (np.prod(shape), 3)), T[0:3, 0:3].T) + XYZ[:, 0] += T[0, 3] + XYZ[:, 1] += T[1, 3] + XYZ[:, 2] += T[2, 3] + XYZ = np.reshape(XYZ, shape + (3,)) return XYZ def test_apply_affine(): rng = np.random.RandomState(20110903) aff = np.diag([2, 3, 4, 1]) - pts = rng.uniform(size=(4,3)) + pts = rng.uniform(size=(4, 3)) assert_array_equal(apply_affine(aff, pts), pts * [[2, 3, 4]]) - aff[:3,3] = [10, 11, 12] + aff[:3, 3] = [10, 11, 12] assert_array_equal(apply_affine(aff, pts), pts * [[2, 3, 4]] + [[10, 11, 12]]) - aff[:3,:] = rng.normal(size=(3,4)) - exp_res = np.concatenate((pts.T, np.ones((1,4))), axis=0) - exp_res = np.dot(aff, exp_res)[:3,:].T + aff[:3, :] = rng.normal(size=(3, 4)) + exp_res = np.concatenate((pts.T, np.ones((1, 4))), axis=0) + exp_res = np.dot(aff, exp_res)[:3, :].T assert_array_equal(apply_affine(aff, pts), exp_res) # Check we get the same result as the previous implementation assert_almost_equal(validated_apply_affine(aff, pts), apply_affine(aff, pts)) # Check that lists work for inputs assert_array_equal(apply_affine(aff.tolist(), pts.tolist()), exp_res) # Check that it's the same as a banal implementation in the simple case - aff = np.array([[0,2,0,10],[3,0,0,11],[0,0,4,12],[0,0,0,1]]) - pts = np.array([[1,2,3],[2,3,4],[4,5,6],[6,7,8]]) - exp_res = (np.dot(aff[:3,:3], pts.T) + aff[:3,3:4]).T + aff = np.array([[0, 2, 0, 10], [3, 0, 0, 11], [0, 0, 4, 12], [0, 0, 0, 1]]) + pts = np.array([[1, 2, 3], [2, 3, 4], [4, 5, 6], [6, 7, 8]]) + exp_res = (np.dot(aff[:3, :3], pts.T) + aff[:3, 3:4]).T assert_array_equal(apply_affine(aff, pts), exp_res) # That points can be reshaped and you'll get the same shape output - pts = pts.reshape((2,2,3)) - exp_res = exp_res.reshape((2,2,3)) + pts = pts.reshape((2, 2, 3)) + exp_res = exp_res.reshape((2, 2, 3)) assert_array_equal(apply_affine(aff, pts), exp_res) # That ND also works - for N in range(2,6): + for N in range(2, 6): aff = np.eye(N) - nd = N-1 - aff[:nd,:nd] = rng.normal(size=(nd,nd)) - pts = rng.normal(size=(2,3,nd)) + nd = N - 1 + aff[:nd, :nd] = rng.normal(size=(nd, nd)) + pts = rng.normal(size=(2, 3, nd)) res = apply_affine(aff, pts) # crude apply - new_pts = np.ones((N,6)) - new_pts[:-1,:] = np.rollaxis(pts, -1).reshape((nd,6)) + new_pts = np.ones((N, 6)) + new_pts[:-1, :] = np.rollaxis(pts, -1).reshape((nd, 6)) exp_pts = np.dot(aff, new_pts) - exp_pts = np.rollaxis(exp_pts[:-1,:], 0, 2) - exp_res = exp_pts.reshape((2,3,nd)) + exp_pts = np.rollaxis(exp_pts[:-1, :], 0, 2) + exp_res = exp_pts.reshape((2, 3, nd)) assert_array_almost_equal(res, exp_res) def test_matrix_vector(): - for M, N in ((4,4), (5,4), (4, 5)): + for M, N in ((4, 4), (5, 4), (4, 5)): xform = np.zeros((M, N)) - xform[:-1,:] = np.random.normal(size=(M-1, N)) - xform[-1,-1] = 1 + xform[:-1, :] = np.random.normal(size=(M - 1, N)) + xform[-1, -1] = 1 newmat, newvec = to_matvec(xform) mat = xform[:-1, :-1] vec = xform[:-1, -1] assert_array_equal(newmat, mat) assert_array_equal(newvec, vec) - assert_equal(newvec.shape, (M-1,)) + assert_equal(newvec.shape, (M - 1,)) assert_array_equal(from_matvec(mat, vec), xform) # Check default translation works xform_not = xform[:] - xform_not[:-1,:] = 0 + xform_not[:-1, :] = 0 assert_array_equal(from_matvec(mat), xform) assert_array_equal(from_matvec(mat, None), xform) # Check array-like works @@ -93,37 +93,37 @@ def test_matrix_vector(): def test_append_diag(): # Routine for appending diagonal elements - assert_array_equal(append_diag(np.diag([2,3,1]), [1]), - np.diag([2,3,1,1])) - assert_array_equal(append_diag(np.diag([2,3,1]), [1,1]), - np.diag([2,3,1,1,1])) - aff = np.array([[2,0,0], - [0,3,0], - [0,0,1], - [0,0,1]]) + assert_array_equal(append_diag(np.diag([2, 3, 1]), [1]), + np.diag([2, 3, 1, 1])) + assert_array_equal(append_diag(np.diag([2, 3, 1]), [1, 1]), + np.diag([2, 3, 1, 1, 1])) + aff = np.array([[2, 0, 0], + [0, 3, 0], + [0, 0, 1], + [0, 0, 1]]) assert_array_equal(append_diag(aff, [5], [9]), - [[2,0,0,0], - [0,3,0,0], - [0,0,0,1], - [0,0,5,9], - [0,0,0,1]]) - assert_array_equal(append_diag(aff, [5,6], [9,10]), - [[2,0,0,0,0], - [0,3,0,0,0], - [0,0,0,0,1], - [0,0,5,0,9], - [0,0,0,6,10], - [0,0,0,0,1]]) - aff = np.array([[2,0,0,0], - [0,3,0,0], - [0,0,0,1]]) + [[2, 0, 0, 0], + [0, 3, 0, 0], + [0, 0, 0, 1], + [0, 0, 5, 9], + [0, 0, 0, 1]]) + assert_array_equal(append_diag(aff, [5, 6], [9, 10]), + [[2, 0, 0, 0, 0], + [0, 3, 0, 0, 0], + [0, 0, 0, 0, 1], + [0, 0, 5, 0, 9], + [0, 0, 0, 6, 10], + [0, 0, 0, 0, 1]]) + aff = np.array([[2, 0, 0, 0], + [0, 3, 0, 0], + [0, 0, 0, 1]]) assert_array_equal(append_diag(aff, [5], [9]), - [[2,0,0,0,0], - [0,3,0,0,0], - [0,0,0,5,9], - [0,0,0,0,1]]) + [[2, 0, 0, 0, 0], + [0, 3, 0, 0, 0], + [0, 0, 0, 5, 9], + [0, 0, 0, 0, 1]]) # Length of starts has to match length of steps - assert_raises(ValueError, append_diag, aff, [5,6], [9]) + assert_raises(ValueError, append_diag, aff, [5, 6], [9]) def test_dot_reduce(): diff --git a/nibabel/tests/test_analyze.py b/nibabel/tests/test_analyze.py index 4a0a4180ab..17e7b07719 100644 --- a/nibabel/tests/test_analyze.py +++ b/nibabel/tests/test_analyze.py @@ -91,7 +91,7 @@ def test_general_init(self): # translations though - these arise from SPM's use of the origin # field, and the center of the image. assert_array_equal(np.diag(hdr.get_base_affine()), - [-1,1,1,1]) + [-1, 1, 1, 1]) # But zooms only go with number of dimensions assert_equal(hdr.get_zooms(), (1.0,)) @@ -103,9 +103,9 @@ def test_empty(self): assert_true(len(hdr.binaryblock) == self.sizeof_hdr) assert_true(hdr['sizeof_hdr'] == self.sizeof_hdr) assert_true(np.all(hdr['dim'][1:] == 1)) - assert_true(hdr['dim'][0] == 0 ) + assert_true(hdr['dim'][0] == 0) assert_true(np.all(hdr['pixdim'] == 1)) - assert_true(hdr['datatype'] == 16) # float32 + assert_true(hdr['datatype'] == 16) # float32 assert_true(hdr['bitpix'] == 32) def _set_something_into_hdr(self, hdr): @@ -130,7 +130,7 @@ def test_checks(self): hdr = hdr_t.copy() hdr['bitpix'] = 0 assert_equal(self._dxer(hdr), 'bitpix does not match datatype') - for i in (1,2,3): + for i in (1, 2, 3): hdr = hdr_t.copy() hdr['pixdim'][i] = -1 assert_equal(self._dxer(hdr), 'pixdim[1,2,3] should be positive') @@ -146,7 +146,7 @@ def test_log_checks(self): assert_equal(fhdr['sizeof_hdr'], self.sizeof_hdr) assert_equal(message, 'sizeof_hdr should be {0}; set sizeof_hdr to {0}'.format( - self.sizeof_hdr)) + self.sizeof_hdr)) assert_raises(*raiser) # RGB datatype does not raise error hdr = HC() @@ -154,53 +154,53 @@ def test_log_checks(self): fhdr, message, raiser = self.log_chk(hdr, 0) # datatype not recognized hdr = HC() - hdr['datatype'] = -1 # severity 40 + hdr['datatype'] = -1 # severity 40 with suppress_warnings(): fhdr, message, raiser = self.log_chk(hdr, 40) assert_equal(message, 'data code -1 not recognized; ' - 'not attempting fix') + 'not attempting fix') assert_raises(*raiser) # datatype not supported - hdr['datatype'] = 255 # severity 40 + hdr['datatype'] = 255 # severity 40 fhdr, message, raiser = self.log_chk(hdr, 40) assert_equal(message, 'data code 255 not supported; ' 'not attempting fix') assert_raises(*raiser) # bitpix hdr = HC() - hdr['datatype'] = 16 # float32 - hdr['bitpix'] = 16 # severity 10 + hdr['datatype'] = 16 # float32 + hdr['bitpix'] = 16 # severity 10 fhdr, message, raiser = self.log_chk(hdr, 10) assert_equal(fhdr['bitpix'], 32) assert_equal(message, 'bitpix does not match datatype; ' - 'setting bitpix to match datatype') + 'setting bitpix to match datatype') assert_raises(*raiser) # pixdim positive hdr = HC() - hdr['pixdim'][1] = -2 # severity 35 + hdr['pixdim'][1] = -2 # severity 35 fhdr, message, raiser = self.log_chk(hdr, 35) assert_equal(fhdr['pixdim'][1], 2) assert_equal(message, 'pixdim[1,2,3] should be positive; ' - 'setting to abs of pixdim values') + 'setting to abs of pixdim values') assert_raises(*raiser) hdr = HC() - hdr['pixdim'][1] = 0 # severity 30 + hdr['pixdim'][1] = 0 # severity 30 fhdr, message, raiser = self.log_chk(hdr, 30) assert_equal(fhdr['pixdim'][1], 1) assert_equal(message, PIXDIM0_MSG) assert_raises(*raiser) # both hdr = HC() - hdr['pixdim'][1] = 0 # severity 30 - hdr['pixdim'][2] = -2 # severity 35 + hdr['pixdim'][1] = 0 # severity 30 + hdr['pixdim'][2] = -2 # severity 35 fhdr, message, raiser = self.log_chk(hdr, 35) assert_equal(fhdr['pixdim'][1], 1) assert_equal(fhdr['pixdim'][2], 2) assert_equal(message, 'pixdim[1,2,3] should be ' - 'non-zero and pixdim[1,2,3] should ' - 'be positive; setting 0 dims to 1 ' - 'and setting to abs of pixdim values') + 'non-zero and pixdim[1,2,3] should ' + 'be positive; setting 0 dims to 1 ' + 'and setting to abs of pixdim values') assert_raises(*raiser) def test_no_scaling_fixes(self): @@ -232,10 +232,10 @@ def test_logger_error(self): # Make a new logger str_io = StringIO() logger = logging.getLogger('test.logger') - logger.setLevel(30) # defaultish level + logger.setLevel(30) # defaultish level logger.addHandler(logging.StreamHandler(str_io)) # Prepare an error - hdr['pixdim'][1] = 0 # severity 30 + hdr['pixdim'][1] = 0 # severity 30 log_cache = imageglobals.logger, imageglobals.error_level try: # Check log message appears in new logger @@ -257,11 +257,12 @@ def test_data_dtype(self): (16, np.float32), (32, np.complex64), (64, np.float64), - (128, np.dtype([('R','u1'), + (128, np.dtype([('R', 'u1'), ('G', 'u1'), ('B', 'u1')]))) # and unsupported - here using some labels instead all_unsupported_types = (np.void, 'none', 'all', 0) + def assert_set_dtype(dt_spec, np_dtype): hdr = self.header_class() hdr.set_data_dtype(dt_spec) @@ -288,9 +289,9 @@ def assert_set_dtype(dt_spec, np_dtype): if np.dtype(npt).str[0] in '=|<>': assert_set_dtype(np.dtype(npt).str[1:], npt) # Test aliases to Python types - assert_set_dtype(float, np.float64) # float64 always supported - np_sys_int = np.dtype(int).type # int could be 32 or 64 bit - if np_sys_int in self.supported_np_types: # no int64 for Analyze + assert_set_dtype(float, np.float64) # float64 always supported + np_sys_int = np.dtype(int).type # int could be 32 or 64 bit + if np_sys_int in self.supported_np_types: # no int64 for Analyze assert_set_dtype(int, np_sys_int) hdr = self.header_class() for inp in all_unsupported_types: @@ -309,7 +310,7 @@ def test_shapes(self): shape = (mx,) hdr.set_data_shape(shape) assert_equal(hdr.get_data_shape(), shape) - shape = (mx+1,) + shape = (mx + 1,) assert_raises(HeaderDataError, hdr.set_data_shape, shape) # Lists or tuples or arrays will work for setting shape shape = (2, 3, 4) @@ -333,16 +334,16 @@ def test_read_write_data(self): np.zeros(3), str_io) # Test valid write - hdr.set_data_shape((1,2,3)) + hdr.set_data_shape((1, 2, 3)) hdr.set_data_dtype(np.float32) S = BytesIO() data = np.arange(6, dtype=np.float64) # data have to be the right shape assert_raises(HeaderDataError, hdr.data_to_fileobj, data, S) - data = data.reshape((1,2,3)) + data = data.reshape((1, 2, 3)) # and size - assert_raises(HeaderDataError, hdr.data_to_fileobj, data[:,:,:-1], S) - assert_raises(HeaderDataError, hdr.data_to_fileobj, data[:,:-1,:], S) + assert_raises(HeaderDataError, hdr.data_to_fileobj, data[:, :, :-1], S) + assert_raises(HeaderDataError, hdr.data_to_fileobj, data[:, :-1, :], S) # OK if so hdr.data_to_fileobj(data, S) # Read it back @@ -355,7 +356,7 @@ def test_read_write_data(self): S2 = BytesIO() hdr2 = hdr.as_byteswapped() hdr2.set_data_dtype(np.float32) - hdr2.set_data_shape((1,2,3)) + hdr2.set_data_shape((1, 2, 3)) hdr2.data_to_fileobj(data, S2) data_back2 = hdr2.data_from_fileobj(S2) # Compares the same @@ -379,7 +380,7 @@ def test_read_write_data(self): assert_raises(HeaderTypeError, hdr.data_to_fileobj, data, S3, rescale=True) # If not scaling we lose precision from rounding - data = np.arange(6, dtype=np.float64).reshape((1,2,3)) + 0.5 + data = np.arange(6, dtype=np.float64).reshape((1, 2, 3)) + 0.5 with np.errstate(invalid='ignore'): hdr.data_to_fileobj(data, S3, rescale=False) data_back = hdr.data_from_fileobj(S3) @@ -400,14 +401,14 @@ def test_datatype(self): npt = codes.type[code] if npt is np.void: assert_raises( - HeaderDataError, - ehdr.set_data_dtype, - code) + HeaderDataError, + ehdr.set_data_dtype, + code) continue dt = codes.dtype[code] ehdr.set_data_dtype(npt) assert_true(ehdr['datatype'] == code) - assert_true(ehdr['bitpix'] == dt.itemsize*8) + assert_true(ehdr['bitpix'] == dt.itemsize * 8) ehdr.set_data_dtype(code) assert_true(ehdr['datatype'] == code) ehdr.set_data_dtype(dt) @@ -422,7 +423,7 @@ def test_offset(self): def test_data_shape_zooms_affine(self): hdr = self.header_class() - for shape in ((1,2,3),(0,),(1,),(1,2),(1,2,3,4)): + for shape in ((1, 2, 3), (0,), (1,), (1, 2), (1, 2, 3, 4)): L = len(shape) hdr.set_data_shape(shape) if L: @@ -434,47 +435,47 @@ def test_data_shape_zooms_affine(self): # errors if zooms do not match shape if len(shape): assert_raises(HeaderDataError, - hdr.set_zooms, - (1,) * (L-1)) + hdr.set_zooms, + (1,) * (L - 1)) # Errors for negative zooms assert_raises(HeaderDataError, - hdr.set_zooms, - (-1,) + (1,)*(L-1)) + hdr.set_zooms, + (-1,) + (1,) * (L - 1)) assert_raises(HeaderDataError, - hdr.set_zooms, - (1,) * (L+1)) + hdr.set_zooms, + (1,) * (L + 1)) # Errors for negative zooms assert_raises(HeaderDataError, - hdr.set_zooms, - (-1,) * L) + hdr.set_zooms, + (-1,) * L) # reducing the dimensionality of the array and then increasing # it again reverts the previously set zoom values to 1.0 hdr = self.header_class() - hdr.set_data_shape((1,2,3)) - hdr.set_zooms((4,5,6)) - assert_array_equal(hdr.get_zooms(), (4,5,6)) - hdr.set_data_shape((1,2)) - assert_array_equal(hdr.get_zooms(), (4,5)) - hdr.set_data_shape((1,2,3)) - assert_array_equal(hdr.get_zooms(), (4,5,1)) + hdr.set_data_shape((1, 2, 3)) + hdr.set_zooms((4, 5, 6)) + assert_array_equal(hdr.get_zooms(), (4, 5, 6)) + hdr.set_data_shape((1, 2)) + assert_array_equal(hdr.get_zooms(), (4, 5)) + hdr.set_data_shape((1, 2, 3)) + assert_array_equal(hdr.get_zooms(), (4, 5, 1)) # Setting zooms changes affine assert_array_equal(np.diag(hdr.get_base_affine()), - [-4,5,1,1]) - hdr.set_zooms((1,1,1)) + [-4, 5, 1, 1]) + hdr.set_zooms((1, 1, 1)) assert_array_equal(np.diag(hdr.get_base_affine()), - [-1,1,1,1]) + [-1, 1, 1, 1]) def test_default_x_flip(self): hdr = self.header_class() hdr.default_x_flip = True - hdr.set_data_shape((1,2,3)) - hdr.set_zooms((1,1,1)) + hdr.set_data_shape((1, 2, 3)) + hdr.set_zooms((1, 1, 1)) assert_array_equal(np.diag(hdr.get_base_affine()), - [-1,1,1,1]) + [-1, 1, 1, 1]) hdr.default_x_flip = False # Check avoids translations assert_array_equal(np.diag(hdr.get_base_affine()), - [1,1,1,1]) + [1, 1, 1, 1]) def test_from_eg_file(self): fileobj = open(self.example_file, 'rb') @@ -486,14 +487,14 @@ def test_orientation(self): # Test flips hdr = self.header_class() assert_true(hdr.default_x_flip) - hdr.set_data_shape((3,5,7)) - hdr.set_zooms((4,5,6)) - aff = np.diag((-4,5,6,1)) - aff[:3,3] = np.array([1,2,3]) * np.array([-4,5,6]) * -1 + hdr.set_data_shape((3, 5, 7)) + hdr.set_zooms((4, 5, 6)) + aff = np.diag((-4, 5, 6, 1)) + aff[:3, 3] = np.array([1, 2, 3]) * np.array([-4, 5, 6]) * -1 assert_array_equal(hdr.get_base_affine(), aff) hdr.default_x_flip = False assert_false(hdr.default_x_flip) - aff[0]*=-1 + aff[0] *= -1 assert_array_equal(hdr.get_base_affine(), aff) def test_str(self): @@ -513,20 +514,24 @@ def test_from_header(self): assert_equal(klass(), empty) hdr = klass() hdr.set_data_dtype(np.float64) - hdr.set_data_shape((1,2,3)) + hdr.set_data_shape((1, 2, 3)) hdr.set_zooms((3.0, 2.0, 1.0)) copy = klass.from_header(hdr) assert_equal(hdr, copy) assert_false(hdr is copy) + class C(object): + def get_data_dtype(self): return np.dtype('i2') - def get_data_shape(self): return (5,4,3) + + def get_data_shape(self): return (5, 4, 3) + def get_zooms(self): return (10.0, 9.0, 8.0) converted = klass.from_header(C()) assert_true(isinstance(converted, klass)) assert_equal(converted.get_data_dtype(), np.dtype('i2')) - assert_equal(converted.get_data_shape(), (5,4,3)) - assert_equal(converted.get_zooms(), (10.0,9.0,8.0)) + assert_equal(converted.get_data_shape(), (5, 4, 3)) + assert_equal(converted.get_zooms(), (10.0, 9.0, 8.0)) def test_base_affine(self): klass = self.header_class @@ -536,31 +541,31 @@ def test_base_affine(self): assert_true(hdr.default_x_flip) assert_array_almost_equal( hdr.get_base_affine(), - [[-3., 0., 0., 3.], - [ 0., 2., 0., -4.], - [ 0., 0., 1., -3.], - [ 0., 0., 0., 1.]]) + [[-3., 0., 0., 3.], + [0., 2., 0., -4.], + [0., 0., 1., -3.], + [0., 0., 0., 1.]]) hdr.set_data_shape((3, 5)) assert_array_almost_equal( hdr.get_base_affine(), - [[-3., 0., 0., 3.], - [ 0., 2., 0., -4.], - [ 0., 0., 1., -0.], - [ 0., 0., 0., 1.]]) + [[-3., 0., 0., 3.], + [0., 2., 0., -4.], + [0., 0., 1., -0.], + [0., 0., 0., 1.]]) hdr.set_data_shape((3, 5, 7)) assert_array_almost_equal( hdr.get_base_affine(), - [[-3., 0., 0., 3.], - [ 0., 2., 0., -4.], - [ 0., 0., 1., -3.], - [ 0., 0., 0., 1.]]) + [[-3., 0., 0., 3.], + [0., 2., 0., -4.], + [0., 0., 1., -3.], + [0., 0., 0., 1.]]) def test_scaling(self): # Test integer scaling from float # Analyze headers cannot do float-integer scaling hdr = self.header_class() assert_true(hdr.default_x_flip) - shape = (1,2,3) + shape = (1, 2, 3) hdr.set_data_shape(shape) hdr.set_data_dtype(np.float32) data = np.ones(shape, dtype=np.float64) @@ -607,17 +612,25 @@ def test_from_analyze_map(self): # Test that any header can pass values from a mapping klass = self.header_class # Header needs to implement data_dtype, data_shape, zooms - class H1(object): pass + + class H1(object): + pass assert_raises(AttributeError, klass.from_header, H1()) + class H2(object): + def get_data_dtype(self): return np.dtype('u1') assert_raises(AttributeError, klass.from_header, H2()) + class H3(H2): + def get_data_shape(self): return (2, 3, 4) assert_raises(AttributeError, klass.from_header, H3()) + class H4(H3): + def get_zooms(self): return 4., 5., 6. exp_hdr = klass() @@ -626,20 +639,26 @@ def get_zooms(self): exp_hdr.set_zooms((4, 5, 6)) assert_equal(klass.from_header(H4()), exp_hdr) # cal_max, cal_min get properly set from ``as_analyze_map`` + class H5(H4): + def as_analyze_map(self): return dict(cal_min=-100, cal_max=100) exp_hdr['cal_min'] = -100 exp_hdr['cal_max'] = 100 assert_equal(klass.from_header(H5()), exp_hdr) # set_* methods override fields fron header + class H6(H5): + def as_analyze_map(self): return dict(datatype=4, bitpix=32, cal_min=-100, cal_max=100) assert_equal(klass.from_header(H6()), exp_hdr) # Any mapping will do, including a Nifti header + class H7(H5): + def as_analyze_map(self): n_hdr = Nifti1Header() n_hdr.set_data_dtype(np.dtype('i2')) @@ -653,8 +672,8 @@ def as_analyze_map(self): def test_best_affine(): hdr = AnalyzeHeader() - hdr.set_data_shape((3,5,7)) - hdr.set_zooms((4,5,6)) + hdr.set_data_shape((3, 5, 7)) + hdr.set_zooms((4, 5, 6)) assert_array_equal(hdr.get_base_affine(), hdr.get_best_affine()) @@ -697,7 +716,7 @@ def test_data_hdr_cache(self): fm = IC.make_file_map() for key, value in fm.items(): fm[key].fileobj = BytesIO() - shape = (2,3,4) + shape = (2, 3, 4) data = np.arange(24, dtype=np.int8).reshape(shape) affine = np.eye(4) hdr = IC.header_class() @@ -708,21 +727,21 @@ def test_data_hdr_cache(self): assert_equal(img2.shape, shape) assert_equal(img2.get_data_dtype().type, np.int16) hdr = img2.header - hdr.set_data_shape((3,2,2)) - assert_equal(hdr.get_data_shape(), (3,2,2)) + hdr.set_data_shape((3, 2, 2)) + assert_equal(hdr.get_data_shape(), (3, 2, 2)) hdr.set_data_dtype(np.uint8) assert_equal(hdr.get_data_dtype(), np.dtype(np.uint8)) assert_array_equal(img2.get_data(), data) # now check read_img_data function - here we do see the changed # header sc_data = read_img_data(img2) - assert_equal(sc_data.shape, (3,2,2)) + assert_equal(sc_data.shape, (3, 2, 2)) us_data = read_img_data(img2, prefer='unscaled') - assert_equal(us_data.shape, (3,2,2)) + assert_equal(us_data.shape, (3, 2, 2)) def test_affine_44(self): IC = self.image_class - shape = (2,3,4) + shape = (2, 3, 4) data = np.arange(24, dtype=np.int16).reshape(shape) affine = np.diag([2, 3, 4, 1]) # OK - affine correct shape @@ -784,21 +803,21 @@ def test_header_updating(self): # Only update on changes img_klass = self.image_class # With a None affine - don't overwrite zooms - img = img_klass(np.zeros((2,3,4)), None) + img = img_klass(np.zeros((2, 3, 4)), None) hdr = img.header - hdr.set_zooms((4,5,6)) + hdr.set_zooms((4, 5, 6)) # Save / reload using bytes IO objects for key, value in img.file_map.items(): value.fileobj = BytesIO() img.to_file_map() hdr_back = img.from_file_map(img.file_map).header - assert_array_equal(hdr_back.get_zooms(), (4,5,6)) + assert_array_equal(hdr_back.get_zooms(), (4, 5, 6)) # With a real affine, update zooms - img = img_klass(np.zeros((2,3,4)), np.diag([2,3,4,1]), hdr) + img = img_klass(np.zeros((2, 3, 4)), np.diag([2, 3, 4, 1]), hdr) hdr = img.header assert_array_equal(hdr.get_zooms(), (2, 3, 4)) # Modify affine in-place? Update on save. - img.affine[0,0] = 9 + img.affine[0, 0] = 9 for key, value in img.file_map.items(): value.fileobj = BytesIO() img.to_file_map() @@ -815,7 +834,7 @@ def test_pickle(self): # Test that images pickle # Image that is not proxied can pickle img_klass = self.image_class - img = img_klass(np.zeros((2,3,4)), None) + img = img_klass(np.zeros((2, 3, 4)), None) img_str = pickle.dumps(img) img2 = pickle.loads(img_str) assert_array_equal(img.get_data(), img2.get_data()) @@ -850,7 +869,7 @@ def test_no_finite_values(self): def test_unsupported(): # analyze does not support uint32 - data = np.arange(24, dtype=np.int32).reshape((2,3,4)) + data = np.arange(24, dtype=np.int32).reshape((2, 3, 4)) affine = np.eye(4) - data = np.arange(24, dtype=np.uint32).reshape((2,3,4)) + data = np.arange(24, dtype=np.uint32).reshape((2, 3, 4)) assert_raises(HeaderDataError, AnalyzeImage, data, affine) diff --git a/nibabel/tests/test_api_validators.py b/nibabel/tests/test_api_validators.py index b54f30ea06..642498522e 100644 --- a/nibabel/tests/test_api_validators.py +++ b/nibabel/tests/test_api_validators.py @@ -16,6 +16,7 @@ class validator2test(type): """ def __new__(mcs, name, bases, dict): klass = type.__new__(mcs, name, bases, dict) + def make_test(name, validator): def meth(self): for imaker, params in self.obj_params(): @@ -32,7 +33,6 @@ def meth(self): return klass - class ValidateAPI(with_metaclass(validator2test)): """ A class to validate APIs @@ -67,6 +67,7 @@ def obj_params(self): example. """ class C(object): + def __init__(self, var): self.var = var @@ -76,7 +77,6 @@ def get_var(self): yield C(5), {'var': 5} yield C('easypeasy'), {'var': 'easypeasy'} - def validate_something(self, obj, params): """ Do some checks of the `obj` API against `params` diff --git a/nibabel/tests/test_arrayproxy.py b/nibabel/tests/test_arrayproxy.py index f5ae855eae..3df3a8b4d9 100644 --- a/nibabel/tests/test_arrayproxy.py +++ b/nibabel/tests/test_arrayproxy.py @@ -28,6 +28,7 @@ class FunkyHeader(object): + def __init__(self, shape): self.shape = shape @@ -55,7 +56,7 @@ class CArrayProxy(ArrayProxy): def test_init(): bio = BytesIO() - shape = [2,3,4] + shape = [2, 3, 4] dtype = np.int32 arr = np.arange(24, dtype=dtype).reshape(shape) bio.seek(16) @@ -90,7 +91,7 @@ def write_raw_data(arr, hdr, fileobj): def test_nifti1_init(): bio = BytesIO() - shape = (2,3,4) + shape = (2, 3, 4) hdr = Nifti1Header() arr = np.arange(24, dtype=np.int16).reshape(shape) write_raw_data(arr, hdr, bio) @@ -150,6 +151,7 @@ def test_is_proxy(): assert_false(is_proxy(bio)) assert_false(is_proxy(hdr)) assert_false(is_proxy(np.zeros((2, 3, 4)))) + class NP(object): is_proxy = False assert_false(is_proxy(NP())) @@ -158,6 +160,7 @@ class NP(object): def test_get_unscaled(): # Test fetch of raw array class FunkyHeader2(FunkyHeader): + def get_slope_inter(self): return 2.1, 3.14 shape = (2, 3, 4) @@ -188,14 +191,14 @@ def check_mmap(hdr, offset, proxy_class, check_mode=True): fobj.write(b' ' * offset) fobj.write(arr.tostring(order='F')) for mmap, expected_mode in ( - # mmap value, expected memmap mode - # mmap=None -> no mmap value - # expected mode=None -> no memmap returned - (None, 'c'), - (True, 'c'), - ('c', 'c'), - ('r', 'r'), - (False, None)): + # mmap value, expected memmap mode + # mmap=None -> no mmap value + # expected mode=None -> no memmap returned + (None, 'c'), + (True, 'c'), + ('c', 'c'), + ('r', 'r'), + (False, None)): kwargs = {} if mmap is not None: kwargs['mmap'] = mmap diff --git a/nibabel/tests/test_arraywriters.py b/nibabel/tests/test_arraywriters.py index fc86dd2809..7ab36268b5 100644 --- a/nibabel/tests/test_arraywriters.py +++ b/nibabel/tests/test_arraywriters.py @@ -80,7 +80,7 @@ def test_arraywriters(): # reason -- not our fault, and to test correct operation we # will just compare element by element if NP_VERSION == '1.7.1' and sys.version_info[:2] == (3, 3): - assert_array_equal_ = lambda x, y: np.all([x_==y_ for x_,y_ in zip(x,y)]) + assert_array_equal_ = lambda x, y: np.all([x_ == y_ for x_, y_ in zip(x, y)]) else: assert_array_equal_ = assert_array_equal # assert against original array because POWER7 was running into @@ -122,9 +122,9 @@ def test_arraywriter_check_scaling(): def test_no_scaling(): # Test arraywriter when writing different types without scaling for in_dtype, out_dtype, awt in itertools.product( - NUMERIC_TYPES, - NUMERIC_TYPES, - (ArrayWriter, SlopeArrayWriter, SlopeInterArrayWriter)): + NUMERIC_TYPES, + NUMERIC_TYPES, + (ArrayWriter, SlopeArrayWriter, SlopeInterArrayWriter)): mn_in, mx_in = _dt_min_max(in_dtype) arr = np.array([mn_in, 0, 1, mx_in], dtype=in_dtype) kwargs = (dict(check_scaling=False) if awt == ArrayWriter @@ -242,7 +242,7 @@ def test_scaling_needed(): assert_false(ArrayWriter(arr, out_t).scaling_needed()) continue # The output data type does not include the input data range - max_min = max(in_min, out_min) # 0 for input or output uint + max_min = max(in_min, out_min) # 0 for input or output uint min_max = min(in_max, out_max) arr = np.array([max_min, min_max], in_t) assert_false(ArrayWriter(arr, out_t).scaling_needed()) @@ -269,9 +269,9 @@ def test_special_rt(): assert_equal(get_slope_inter(aw), (1, 0)) assert_array_equal(round_trip(aw), 0) for in_dtt, out_dtt, awt in itertools.product( - FLOAT_TYPES, - IUINT_TYPES, - (ArrayWriter, SlopeArrayWriter, SlopeInterArrayWriter)): + FLOAT_TYPES, + IUINT_TYPES, + (ArrayWriter, SlopeArrayWriter, SlopeInterArrayWriter)): arr = np.zeros((3,), dtype=in_dtt) aw = awt(arr, out_dtt) assert_equal(get_slope_inter(aw), (1, 0)) @@ -295,7 +295,7 @@ def test_slope_inter_castable(): for out_dtt in NUMERIC_TYPES: for klass in (ArrayWriter, SlopeArrayWriter, SlopeInterArrayWriter): arr = np.zeros((5,), dtype=in_dtt) - aw = klass(arr, out_dtt) # no error + aw = klass(arr, out_dtt) # no error # Test special case of none finite # This raises error for ArrayWriter, but not for the others arr = np.array([np.inf, np.nan, -np.inf]) @@ -303,8 +303,8 @@ def test_slope_inter_castable(): for out_dtt in IUINT_TYPES: in_arr = arr.astype(in_dtt) assert_raises(WriterError, ArrayWriter, in_arr, out_dtt) - aw = SlopeArrayWriter(arr.astype(in_dtt), out_dtt) # no error - aw = SlopeInterArrayWriter(arr.astype(in_dtt), out_dtt) # no error + aw = SlopeArrayWriter(arr.astype(in_dtt), out_dtt) # no error + aw = SlopeInterArrayWriter(arr.astype(in_dtt), out_dtt) # no error for in_dtt, out_dtt, arr, slope_only, slope_inter, neither in ( (np.float32, np.float32, 1, True, True, True), (np.float64, np.float32, 1, True, True, True), @@ -316,16 +316,16 @@ def test_slope_inter_castable(): (np.complex128, np.int16, 1, False, False, False), (np.uint8, np.int16, 1, True, True, True), # The following tests depend on the input data - (np.uint16, np.int16, 1, True, True, True), # 1 is in range - (np.uint16, np.int16, 2**16-1, True, True, False), # This not in range - (np.uint16, np.int16, (0, 2**16-1), True, True, False), + (np.uint16, np.int16, 1, True, True, True), # 1 is in range + (np.uint16, np.int16, 2**16 - 1, True, True, False), # This not in range + (np.uint16, np.int16, (0, 2**16 - 1), True, True, False), (np.uint16, np.uint8, 1, True, True, True), - (np.int16, np.uint16, 1, True, True, True), # in range - (np.int16, np.uint16, -1, True, True, False), # flip works for scaling - (np.int16, np.uint16, (-1, 1), False, True, False), # not with +- - (np.int8, np.uint16, 1, True, True, True), # in range - (np.int8, np.uint16, -1, True, True, False), # flip works for scaling - (np.int8, np.uint16, (-1, 1), False, True, False), # not with +- + (np.int16, np.uint16, 1, True, True, True), # in range + (np.int16, np.uint16, -1, True, True, False), # flip works for scaling + (np.int16, np.uint16, (-1, 1), False, True, False), # not with +- + (np.int8, np.uint16, 1, True, True, True), # in range + (np.int8, np.uint16, -1, True, True, False), # flip works for scaling + (np.int8, np.uint16, (-1, 1), False, True, False), # not with +- ): # data for casting data = np.array(arr, dtype=in_dtt) @@ -384,15 +384,15 @@ def test_resets(): outp = np.array(outp) aw = klass(arr, np.uint8) assert_array_equal(get_slope_inter(aw), outp) - aw.calc_scale() # cached no change + aw.calc_scale() # cached no change assert_array_equal(get_slope_inter(aw), outp) - aw.calc_scale(force=True) # same data, no change + aw.calc_scale(force=True) # same data, no change assert_array_equal(get_slope_inter(aw), outp) # Change underlying array aw.array[:] = aw.array * 2 - aw.calc_scale() # cached still + aw.calc_scale() # cached still assert_array_equal(get_slope_inter(aw), outp) - aw.calc_scale(force=True) # new data, change + aw.calc_scale(force=True) # new data, change assert_array_equal(get_slope_inter(aw), outp * 2) # Test reset aw.reset() @@ -404,12 +404,12 @@ def test_no_offset_scale(): SAW = SlopeArrayWriter # Floating point for data in ((-128, 127), - (-128, 126), - (-128, -127), - (-128, 0), - (-128, -1), - (126, 127), - (-127, 127)): + (-128, 126), + (-128, -127), + (-128, 0), + (-128, -1), + (126, 127), + (-127, 127)): aw = SAW(np.array(data, dtype=np.float32), np.int8) assert_equal(aw.slope, 1.0) aw = SAW(np.array([-126, 127 * 2.0], dtype=np.float32), np.int8) @@ -426,17 +426,17 @@ def test_with_offset_scale(): # Tests of specific cases in slope, inter SIAW = SlopeInterArrayWriter aw = SIAW(np.array([0, 127], dtype=np.int8), np.uint8) - assert_equal((aw.slope, aw.inter), (1, 0)) # in range + assert_equal((aw.slope, aw.inter), (1, 0)) # in range aw = SIAW(np.array([-1, 126], dtype=np.int8), np.uint8) - assert_equal((aw.slope, aw.inter), (1, -1)) # offset only + assert_equal((aw.slope, aw.inter), (1, -1)) # offset only aw = SIAW(np.array([-1, 254], dtype=np.int16), np.uint8) - assert_equal((aw.slope, aw.inter), (1, -1)) # offset only + assert_equal((aw.slope, aw.inter), (1, -1)) # offset only aw = SIAW(np.array([-1, 255], dtype=np.int16), np.uint8) - assert_not_equal((aw.slope, aw.inter), (1, -1)) # Too big for offset only + assert_not_equal((aw.slope, aw.inter), (1, -1)) # Too big for offset only aw = SIAW(np.array([-256, -2], dtype=np.int16), np.uint8) - assert_equal((aw.slope, aw.inter), (1, -256)) # offset only + assert_equal((aw.slope, aw.inter), (1, -256)) # offset only aw = SIAW(np.array([-256, -2], dtype=np.int16), np.int8) - assert_equal((aw.slope, aw.inter), (1, -129)) # offset only + assert_equal((aw.slope, aw.inter), (1, -129)) # offset only def test_io_scaling(): @@ -444,12 +444,12 @@ def test_io_scaling(): # and from float to integer. bio = BytesIO() for in_type, out_type in itertools.product( - (np.int16, np.uint16, np.float32), - (np.int8, np.uint8, np.int16, np.uint16)): + (np.int16, np.uint16, np.float32), + (np.int8, np.uint8, np.int16, np.uint16)): out_dtype = np.dtype(out_type) info = type_info(in_type) imin, imax = info['min'], info['max'] - if imin == 0: # unsigned int + if imin == 0: # unsigned int val_tuples = ((0, imax), (100, imax)) else: @@ -485,8 +485,8 @@ def test_input_ranges(): working_type = np.float32 work_eps = np.finfo(working_type).eps for out_type, offset in itertools.product( - IUINT_TYPES, - range(-1000, 1000, 100)): + IUINT_TYPES, + range(-1000, 1000, 100)): aw = SlopeInterArrayWriter(arr, out_type) aw.to_fileobj(bio) arr2 = array_from_file(arr.shape, out_type, bio) @@ -789,7 +789,7 @@ def test_nan2zero_scaling(): FLOAT_TYPES, IUINT_TYPES, (-1, 1), - ): + ): # Use fixed-up type information to avoid bugs, especially on PPC in_info = type_info(in_dt) out_info = type_info(out_dt) @@ -818,28 +818,28 @@ def test_nan2zero_scaling(): def test_finite_range_nan(): # Test finite range method and has_nan property for in_arr, res in ( - ([[-1, 0, 1],[np.inf, np.nan, -np.inf]], (-1, 1)), - (np.array([[-1, 0, 1],[np.inf, np.nan, -np.inf]]), (-1, 1)), - ([[np.nan],[np.nan]], (np.inf, -np.inf)), # all nans slices + ([[-1, 0, 1], [np.inf, np.nan, -np.inf]], (-1, 1)), + (np.array([[-1, 0, 1], [np.inf, np.nan, -np.inf]]), (-1, 1)), + ([[np.nan], [np.nan]], (np.inf, -np.inf)), # all nans slices (np.zeros((3, 4, 5)) + np.nan, (np.inf, -np.inf)), - ([[-np.inf],[np.inf]], (np.inf, -np.inf)), # all infs slices + ([[-np.inf], [np.inf]], (np.inf, -np.inf)), # all infs slices (np.zeros((3, 4, 5)) + np.inf, (np.inf, -np.inf)), ([[np.nan, -1, 2], [-2, np.nan, 1]], (-2, 2)), ([[np.nan, -np.inf, 2], [-2, np.nan, np.inf]], (-2, 2)), - ([[-np.inf, 2], [np.nan, 1]], (1, 2)), # good max case + ([[-np.inf, 2], [np.nan, 1]], (1, 2)), # good max case ([[np.nan, -np.inf, 2], [-2, np.nan, np.inf]], (-2, 2)), ([np.nan], (np.inf, -np.inf)), ([np.inf], (np.inf, -np.inf)), ([-np.inf], (np.inf, -np.inf)), - ([np.inf, 1], (1, 1)), # only look at finite values + ([np.inf, 1], (1, 1)), # only look at finite values ([-np.inf, 1], (1, 1)), - ([[],[]], (np.inf, -np.inf)), # empty array + ([[], []], (np.inf, -np.inf)), # empty array (np.array([[-3, 0, 1], [2, -1, 4]], dtype=np.int), (-3, 4)), (np.array([[1, 0, 1], [2, 3, 4]], dtype=np.uint), (0, 4)), - ([0., 1, 2, 3], (0,3)), + ([0., 1, 2, 3], (0, 3)), # Complex comparison works as if they are floats - ([[np.nan, -1-100j, 2], [-2, np.nan, 1+100j]], (-2, 2)), - ([[np.nan, -1, 2-100j], [-2+100j, np.nan, 1]], (-2+100j, 2-100j)), + ([[np.nan, -1 - 100j, 2], [-2, np.nan, 1 + 100j]], (-2, 2)), + ([[np.nan, -1, 2 - 100j], [-2 + 100j, np.nan, 1]], (-2 + 100j, 2 - 100j)), ): for awt, kwargs in ((ArrayWriter, dict(check_scaling=False)), (SlopeArrayWriter, {}), diff --git a/nibabel/tests/test_batteryrunners.py b/nibabel/tests/test_batteryrunners.py index 3d6767d6c8..8f1710f779 100644 --- a/nibabel/tests/test_batteryrunners.py +++ b/nibabel/tests/test_batteryrunners.py @@ -84,7 +84,7 @@ def test_init_basic(): # Len returns number of checks battrun = BatteryRunner((chk1,)) assert_equal(len(battrun), 1) - battrun = BatteryRunner((chk1,chk2)) + battrun = BatteryRunner((chk1, chk2)) assert_equal(len(battrun), 2) @@ -106,13 +106,15 @@ def test_report_strings(): rep.problem_level = 30 rep.write_raise(str_io) assert_equal(str_io.getvalue(), 'Level 30: msg; fix\n') - str_io.truncate(0); str_io.seek(0) + str_io.truncate(0) + str_io.seek(0) # No fix string, no fix message rep.fix_msg = '' rep.write_raise(str_io) assert_equal(str_io.getvalue(), 'Level 30: msg\n') rep.fix_msg = 'fix' - str_io.truncate(0); str_io.seek(0) + str_io.truncate(0) + str_io.seek(0) # If we drop the level, nothing goes to the log rep.problem_level = 20 rep.write_raise(str_io) @@ -120,7 +122,8 @@ def test_report_strings(): # Unless we set the default log level in the call rep.write_raise(str_io, log_level=20) assert_equal(str_io.getvalue(), 'Level 20: msg; fix\n') - str_io.truncate(0); str_io.seek(0) + str_io.truncate(0) + str_io.seek(0) # If we set the error level down this low, we raise an error assert_raises(ValueError, rep.write_raise, str_io, 20) # But the log level wasn't low enough to do a log entry @@ -130,7 +133,8 @@ def test_report_strings(): assert_raises(ValueError, rep.write_raise, str_io, 20, 20) assert_equal(str_io.getvalue(), 'Level 20: msg; fix\n') # If there's no error, we can't raise - str_io.truncate(0); str_io.seek(0) + str_io.truncate(0) + str_io.seek(0) rep.error = None rep.write_raise(str_io, 20) assert_equal(str_io.getvalue(), '') @@ -140,14 +144,15 @@ def test_logging(): rep = Report(ValueError, 20, 'msg', 'fix') str_io = StringIO() logger = logging.getLogger('test.logger') - logger.setLevel(30) # defaultish level + logger.setLevel(30) # defaultish level logger.addHandler(logging.StreamHandler(str_io)) rep.log_raise(logger) assert_equal(str_io.getvalue(), '') rep.problem_level = 30 rep.log_raise(logger) assert_equal(str_io.getvalue(), 'msg; fix\n') - str_io.truncate(0); str_io.seek(0) + str_io.truncate(0) + str_io.seek(0) def test_checks(): @@ -164,8 +169,8 @@ def test_checks(): 20, 'no "testkey"', 'added "testkey"')) - assert_equal(obj, {'testkey':1}) - battrun = BatteryRunner((chk1,chk2)) + assert_equal(obj, {'testkey': 1}) + battrun = BatteryRunner((chk1, chk2)) reports = battrun.check_only({}) assert_equal(reports[0], Report(KeyError, @@ -181,7 +186,7 @@ def test_checks(): # In the case of fix, the previous fix exposes a different error # Note, because obj is mutable, first and second point to modified # (and final) dictionary - output_obj = {'testkey':0} + output_obj = {'testkey': 0} assert_equal(reports[0], Report(KeyError, 20, diff --git a/nibabel/tests/test_casting.py b/nibabel/tests/test_casting.py index 15bd279a1d..c9d3645ad1 100644 --- a/nibabel/tests/test_casting.py +++ b/nibabel/tests/test_casting.py @@ -89,7 +89,7 @@ def test_casting(): for ft in np.sctypes['float']: for it in np.sctypes['int'] + np.sctypes['uint']: ii = np.iinfo(it) - arr = [ii.min-1, ii.max+1, -np.inf, np.inf, np.nan, 0.2, 10.6] + arr = [ii.min - 1, ii.max + 1, -np.inf, np.inf, np.nan, 0.2, 10.6] farr_orig = np.array(arr, dtype=ft) # We're later going to test if we modify this array farr = farr_orig.copy() @@ -123,7 +123,7 @@ def test_casting(): # Confirm input array is not modified nans = np.isnan(farr) assert_array_equal(nans, np.isnan(farr_orig)) - assert_array_equal(farr[nans==False], farr_orig[nans==False]) + assert_array_equal(farr[nans == False], farr_orig[nans == False]) # Test scalars work and return scalars assert_array_equal(float_to_int(np.float32(0), np.int16), [0]) # Test scalar nan OK @@ -142,15 +142,15 @@ def test_int_abs(): assert_equal(udtype.kind, 'u') assert_equal(idtype.itemsize, udtype.itemsize) mn, mx = in_arr - e_mn = as_int(mx) + 1 # as_int needed for numpy 1.4.1 casting + e_mn = as_int(mx) + 1 # as_int needed for numpy 1.4.1 casting assert_equal(int_abs(mx), mx) assert_equal(int_abs(mn), e_mn) assert_array_equal(int_abs(in_arr), [e_mn, mx]) def test_floor_log2(): - assert_equal(floor_log2(2**9+1), 9) - assert_equal(floor_log2(-2**9+1), 8) + assert_equal(floor_log2(2**9 + 1), 9) + assert_equal(floor_log2(-2**9 + 1), 8) assert_equal(floor_log2(2), 1) assert_equal(floor_log2(1), 0) assert_equal(floor_log2(0.5), -1) @@ -163,19 +163,19 @@ def test_floor_log2(): def test_able_int_type(): # The integer type cabable of containing values for vals, exp_out in ( - ([0, 1], np.uint8), - ([0, 255], np.uint8), - ([-1, 1], np.int8), - ([0, 256], np.uint16), - ([-1, 128], np.int16), - ([0.1, 1], None), - ([0, 2**16], np.uint32), - ([-1, 2**15], np.int32), - ([0, 2**32], np.uint64), - ([-1, 2**31], np.int64), - ([-1, 2**64-1], None), - ([0, 2**64-1], np.uint64), - ([0, 2**64], None)): + ([0, 1], np.uint8), + ([0, 255], np.uint8), + ([-1, 1], np.int8), + ([0, 256], np.uint16), + ([-1, 128], np.int16), + ([0.1, 1], None), + ([0, 2**16], np.uint32), + ([-1, 2**15], np.int32), + ([0, 2**32], np.uint64), + ([-1, 2**31], np.int64), + ([-1, 2**64 - 1], None), + ([0, 2**64 - 1], np.uint64), + ([0, 2**64], None)): assert_equal(able_int_type(vals), exp_out) @@ -215,9 +215,9 @@ def test_best_float(): assert_equal(end_of_ints, end_of_ints + 1) # longdouble may have more, but not on 32 bit windows, at least end_of_ints = np.longdouble(2**53) - if (end_of_ints == (end_of_ints + 1) or # off continuous integers - machine() == 'sparc64' or # crippling slow longdouble on sparc - longdouble_precision_improved()): # Windows precisions can change + if (end_of_ints == (end_of_ints + 1) or # off continuous integers + machine() == 'sparc64' or # crippling slow longdouble on sparc + longdouble_precision_improved()): # Windows precisions can change assert_equal(best, np.float64) else: assert_equal(best, np.longdouble) @@ -237,12 +237,12 @@ def test_ulp(): assert_equal(ulp(np.float32(1.999)), np.finfo(np.float32).eps) # Integers always return 1 assert_equal(ulp(1), 1) - assert_equal(ulp(2**63-1), 1) + assert_equal(ulp(2**63 - 1), 1) # negative / positive same assert_equal(ulp(-1), 1) assert_equal(ulp(7.999), ulp(4.0)) assert_equal(ulp(-7.999), ulp(4.0)) - assert_equal(ulp(np.float64(2**54-2)), 2) + assert_equal(ulp(np.float64(2**54 - 2)), 2) assert_equal(ulp(np.float64(2**54)), 4) assert_equal(ulp(np.float64(2**54)), 4) # Infs, NaNs return nan @@ -250,13 +250,13 @@ def test_ulp(): assert_true(np.isnan(ulp(-np.inf))) assert_true(np.isnan(ulp(np.nan))) # 0 gives subnormal smallest - subn64 = np.float64(2**(-1022-52)) - subn32 = np.float32(2**(-126-23)) + subn64 = np.float64(2**(-1022 - 52)) + subn32 = np.float32(2**(-126 - 23)) assert_equal(ulp(0.0), subn64) assert_equal(ulp(np.float64(0)), subn64) assert_equal(ulp(np.float32(0)), subn32) # as do multiples of subnormal smallest assert_equal(ulp(subn64 * np.float64(2**52)), subn64) - assert_equal(ulp(subn64 * np.float64(2**53)), subn64*2) + assert_equal(ulp(subn64 * np.float64(2**53)), subn64 * 2) assert_equal(ulp(subn32 * np.float32(2**23)), subn32) - assert_equal(ulp(subn32 * np.float32(2**24)), subn32*2) + assert_equal(ulp(subn32 * np.float32(2**24)), subn32 * 2) diff --git a/nibabel/tests/test_data.py b/nibabel/tests/test_data.py index d4d54d368d..365351f4ed 100644 --- a/nibabel/tests/test_data.py +++ b/nibabel/tests/test_data.py @@ -9,9 +9,9 @@ import tempfile from ..data import (get_data_path, find_data_dir, - DataError, _cfg_value, make_datasource, - Datasource, VersionedDatasource, Bomber, - datasource_or_bomber) + DataError, _cfg_value, make_datasource, + Datasource, VersionedDatasource, Bomber, + datasource_or_bomber) from ..tmpdirs import TemporaryDirectory @@ -28,6 +28,7 @@ DATA_FUNCS = {} + def setup_data_env(): setup_environment() global DATA_FUNCS @@ -52,8 +53,8 @@ def test_datasource(): pth = pjoin('some', 'path') ds = Datasource(pth) yield assert_equal, ds.get_filename('unlikeley'), pjoin(pth, 'unlikeley') - yield (assert_equal, ds.get_filename('un','like','ley'), - pjoin(pth, 'un','like','ley')) + yield (assert_equal, ds.get_filename('un', 'like', 'ley'), + pjoin(pth, 'un', 'like', 'ley')) def test_versioned(): @@ -137,8 +138,8 @@ def test_data_path(): if USER_KEY in env: del os.environ[USER_KEY] fake_user_dir = '/user/path' - nibd.get_nipy_system_dir = lambda : '/unlikely/path' - nibd.get_nipy_user_dir = lambda : fake_user_dir + nibd.get_nipy_system_dir = lambda: '/unlikely/path' + nibd.get_nipy_user_dir = lambda: fake_user_dir # now we should only have anything pointed to in the user's dir old_pth = get_data_path() # We should have only sys.prefix and, iff sys.prefix == /usr, @@ -163,13 +164,13 @@ def test_data_path(): with open(tmpfile, 'wt') as fobj: fobj.write('[DATA]\n') fobj.write('path = %s' % tst_pth) - nibd.get_nipy_user_dir = lambda : tmpdir + nibd.get_nipy_user_dir = lambda: tmpdir assert_equal(get_data_path(), tst_list + def_dirs + [tmpdir]) - nibd.get_nipy_user_dir = lambda : fake_user_dir + nibd.get_nipy_user_dir = lambda: fake_user_dir assert_equal(get_data_path(), old_pth) # with some trepidation, the system config files with TemporaryDirectory() as tmpdir: - nibd.get_nipy_system_dir = lambda : tmpdir + nibd.get_nipy_system_dir = lambda: tmpdir tmpfile = pjoin(tmpdir, 'an_example.ini') with open(tmpfile, 'wt') as fobj: fobj.write('[DATA]\n') @@ -213,19 +214,19 @@ def test_find_data_dir(): @with_environment def test_make_datasource(): pkg_def = dict( - relpath = 'pkg') + relpath='pkg') with TemporaryDirectory() as tmpdir: - nibd.get_data_path = lambda : [tmpdir] + nibd.get_data_path = lambda: [tmpdir] yield (assert_raises, - DataError, - make_datasource, - pkg_def) + DataError, + make_datasource, + pkg_def) pkg_dir = pjoin(tmpdir, 'pkg') os.mkdir(pkg_dir) yield (assert_raises, - DataError, - make_datasource, - pkg_def) + DataError, + make_datasource, + pkg_def) tmpfile = pjoin(pkg_dir, 'config.ini') with open(tmpfile, 'wt') as fobj: fobj.write('[DEFAULT]\n') @@ -248,9 +249,9 @@ def test_bomber_inspect(): @with_environment def test_datasource_or_bomber(): pkg_def = dict( - relpath = 'pkg') + relpath='pkg') with TemporaryDirectory() as tmpdir: - nibd.get_data_path = lambda : [tmpdir] + nibd.get_data_path = lambda: [tmpdir] ds = datasource_or_bomber(pkg_def) yield (assert_raises, DataError, @@ -267,13 +268,12 @@ def test_datasource_or_bomber(): fn = ds.get_filename('some_file.txt') # check that versioning works pkg_def['min version'] = '0.2' - ds = datasource_or_bomber(pkg_def) # OK + ds = datasource_or_bomber(pkg_def) # OK fn = ds.get_filename('some_file.txt') pkg_def['min version'] = '0.3' - ds = datasource_or_bomber(pkg_def) # not OK + ds = datasource_or_bomber(pkg_def) # not OK yield (assert_raises, DataError, getattr, ds, 'get_filename') - diff --git a/nibabel/tests/test_deprecated.py b/nibabel/tests/test_deprecated.py index 0145266a40..47fdfc2b17 100644 --- a/nibabel/tests/test_deprecated.py +++ b/nibabel/tests/test_deprecated.py @@ -20,12 +20,16 @@ def test_module_proxy(): def test_futurewarning_mixin(): # Test mixin for FutureWarning class C(object): + def __init__(self, val): self.val = val + def meth(self): return self.val + class D(FutureWarningMixin, C): pass + class E(FutureWarningMixin, C): warn_message = "Oh no, not this one" with warnings.catch_warnings(record=True) as warns: diff --git a/nibabel/tests/test_dft.py b/nibabel/tests/test_dft.py index 40c093a9f0..af0029443f 100644 --- a/nibabel/tests/test_dft.py +++ b/nibabel/tests/test_dft.py @@ -24,6 +24,7 @@ data_dir = pjoin(dirname(__file__), 'data') + def setup_module(): if os.name == 'nt': raise SkipTest('FUSE not available for windows, skipping dft tests') @@ -91,7 +92,6 @@ def test_png(): def test_nifti(): studies = dft.get_studies(data_dir) data = studies[0].series[0].as_nifti() - assert_equal(len(data), 352 + 2*256*256*2) + assert_equal(len(data), 352 + 2 * 256 * 256 * 2) h = nifti1.Nifti1Header(data[:348]) assert_equal(h.get_data_shape(), (256, 256, 2)) - diff --git a/nibabel/tests/test_ecat.py b/nibabel/tests/test_ecat.py index 021d92df27..27d81ce77d 100644 --- a/nibabel/tests/test_ecat.py +++ b/nibabel/tests/test_ecat.py @@ -51,7 +51,7 @@ def _set_something_into_hdr(self, hdr): hdr['scan_start_time'] = 42 def test_dtype(self): - #dtype not specified in header, only in subheaders + # dtype not specified in header, only in subheaders hdr = self.header_class() assert_raises(NotImplementedError, hdr.get_data_dtype) @@ -87,28 +87,28 @@ def test_mlist(self): mlist = read_mlist(fid, hdr.endianness) fid.seek(0) fid.seek(512) - dat=fid.read(128*32) - dt = np.dtype([('matlist',np.int32)]) + dat = fid.read(128 * 32) + dt = np.dtype([('matlist', np.int32)]) dt = dt.newbyteorder('>') - mats = np.recarray(shape=(32,4), dtype=dt, buf=dat) + mats = np.recarray(shape=(32, 4), dtype=dt, buf=dat) fid.close() - #tests - assert_true(mats['matlist'][0,0] + mats['matlist'][0,3] == 31) + # tests + assert_true(mats['matlist'][0, 0] + mats['matlist'][0, 3] == 31) assert_true(get_frame_order(mlist)[0][0] == 0) assert_true(get_frame_order(mlist)[0][1] == 16842758.0) # test badly ordered mlist - badordermlist = np.array([[1.68427540e+07, 3.00000000e+00, - 1.20350000e+04, 1.00000000e+00], - [1.68427530e+07, 1.20360000e+04, - 2.40680000e+04, 1.00000000e+00], - [1.68427550e+07, 2.40690000e+04, - 3.61010000e+04, 1.00000000e+00], - [1.68427560e+07, 3.61020000e+04, - 4.81340000e+04, 1.00000000e+00], - [1.68427570e+07, 4.81350000e+04, - 6.01670000e+04, 1.00000000e+00], - [1.68427580e+07, 6.01680000e+04, - 7.22000000e+04, 1.00000000e+00]]) + badordermlist = np.array([[1.68427540e+07, 3.00000000e+00, + 1.20350000e+04, 1.00000000e+00], + [1.68427530e+07, 1.20360000e+04, + 2.40680000e+04, 1.00000000e+00], + [1.68427550e+07, 2.40690000e+04, + 3.61010000e+04, 1.00000000e+00], + [1.68427560e+07, 3.61020000e+04, + 4.81340000e+04, 1.00000000e+00], + [1.68427570e+07, 4.81350000e+04, + 6.01670000e+04, 1.00000000e+00], + [1.68427580e+07, 6.01680000e+04, + 7.22000000e+04, 1.00000000e+00]]) with suppress_warnings(): # STORED order assert_true(get_frame_order(badordermlist)[0][0] == 1) @@ -117,18 +117,18 @@ def test_mlist_errors(self): hdr = self.header_class.from_fileobj(fid) hdr['num_frames'] = 6 mlist = read_mlist(fid, hdr.endianness) - mlist = np.array([[1.68427540e+07, 3.00000000e+00, - 1.20350000e+04, 1.00000000e+00], - [1.68427530e+07, 1.20360000e+04, - 2.40680000e+04, 1.00000000e+00], - [1.68427550e+07, 2.40690000e+04, - 3.61010000e+04, 1.00000000e+00], - [1.68427560e+07, 3.61020000e+04, - 4.81340000e+04, 1.00000000e+00], - [1.68427570e+07, 4.81350000e+04, - 6.01670000e+04, 1.00000000e+00], - [1.68427580e+07, 6.01680000e+04, - 7.22000000e+04, 1.00000000e+00]]) + mlist = np.array([[1.68427540e+07, 3.00000000e+00, + 1.20350000e+04, 1.00000000e+00], + [1.68427530e+07, 1.20360000e+04, + 2.40680000e+04, 1.00000000e+00], + [1.68427550e+07, 2.40690000e+04, + 3.61010000e+04, 1.00000000e+00], + [1.68427560e+07, 3.61020000e+04, + 4.81340000e+04, 1.00000000e+00], + [1.68427570e+07, 4.81350000e+04, + 6.01670000e+04, 1.00000000e+00], + [1.68427580e+07, 6.01680000e+04, + 7.22000000e+04, 1.00000000e+00]]) with suppress_warnings(): # STORED order series_framenumbers = get_series_framenumbers(mlist) # first frame stored was actually 2nd frame acquired @@ -136,10 +136,10 @@ def test_mlist_errors(self): order = [series_framenumbers[x] for x in sorted(series_framenumbers)] # true series order is [2,1,3,4,5,6], note counting starts at 1 assert_true(order == [2, 1, 3, 4, 5, 6]) - mlist[0,0] = 0 + mlist[0, 0] = 0 with suppress_warnings(): frames_order = get_frame_order(mlist) - neworder =[frames_order[x][0] for x in sorted(frames_order)] + neworder = [frames_order[x][0] for x in sorted(frames_order)] assert_true(neworder == [1, 2, 3, 4, 5]) with suppress_warnings(): assert_raises(IOError, get_series_framenumbers, mlist) @@ -158,13 +158,13 @@ def test_subheader_size(self): assert_equal(self.subhdr_class._subhdrdtype.itemsize, 510) def test_subheader(self): - assert_equal(self.subhdr.get_shape() , (10,10,3)) - assert_equal(self.subhdr.get_nframes() , 1) + assert_equal(self.subhdr.get_shape(), (10, 10, 3)) + assert_equal(self.subhdr.get_nframes(), 1) assert_equal(self.subhdr.get_nframes(), len(self.subhdr.subheaders)) assert_equal(self.subhdr._check_affines(), True) assert_array_almost_equal(np.diag(self.subhdr.get_frame_affine()), - np.array([ 2.20241979, 2.20241979, 3.125, 1.])) + np.array([2.20241979, 2.20241979, 3.125, 1.])) assert_equal(self.subhdr.get_zooms()[0], 2.20241978764534) assert_equal(self.subhdr.get_zooms()[2], 3.125) assert_equal(self.subhdr._get_data_dtype(0), np.int16) @@ -173,7 +173,7 @@ def test_subheader(self): dat = self.subhdr.raw_data_from_fileobj() assert_equal(dat.shape, self.subhdr.get_shape()) scale_factor = self.subhdr.subheaders[0]['scale_factor'] - assert_equal(self.subhdr.subheaders[0]['scale_factor'].item(),1.0) + assert_equal(self.subhdr.subheaders[0]['scale_factor'].item(), 1.0) ecat_calib_factor = self.hdr['ecat_calibration_factor'] assert_equal(ecat_calib_factor, 25007614.0) @@ -203,7 +203,7 @@ def test_data(self): dat = self.img.get_data() assert_equal(dat.shape, self.img.shape) frame = self.img.get_frame(0) - assert_array_equal(frame, dat[:,:,:,0]) + assert_array_equal(frame, dat[:, :, :, 0]) def test_array_proxy(self): # Get the cached data copy @@ -235,7 +235,7 @@ def test_isolation(self): self.img.get_mlist()) img = img_klass(arr, aff, hdr, sub_hdr, mlist) assert_array_equal(img.affine, aff) - aff[0,0] = 99 + aff[0, 0] = 99 assert_false(np.all(img.affine == aff)) def test_float_affine(self): @@ -254,9 +254,9 @@ def test_float_affine(self): def test_data_regression(self): # Test whether data read has changed since 1.3.0 # These values came from reading the example image using nibabel 1.3.0 - vals = dict(max = 248750736458.0, - min = 1125342630.0, - mean = 117907565661.46666) + vals = dict(max=248750736458.0, + min=1125342630.0, + mean=117907565661.46666) data = self.img.get_data() assert_equal(data.max(), vals['max']) assert_equal(data.min(), vals['min']) diff --git a/nibabel/tests/test_ecat_data.py b/nibabel/tests/test_ecat_data.py index 3f8ae1ab12..f0c9d70b3e 100644 --- a/nibabel/tests/test_ecat_data.py +++ b/nibabel/tests/test_ecat_data.py @@ -27,13 +27,13 @@ class TestNegatives(object): opener = staticmethod(load) example_params = dict( - fname = os.path.join(ECAT_TEST_PATH, 'ECAT7_testcaste_neg_values.v'), - shape = (256, 256, 63, 1), - type = np.int16, + fname=os.path.join(ECAT_TEST_PATH, 'ECAT7_testcaste_neg_values.v'), + shape=(256, 256, 63, 1), + type=np.int16, # These values from freec64 - min = -0.00061576, - max = 0.19215, - mean = 0.04933, + min=-0.00061576, + max=0.19215, + mean=0.04933, # unit: 1/cm ) diff --git a/nibabel/tests/test_endiancodes.py b/nibabel/tests/test_endiancodes.py index 52361b17c6..0e821d1f95 100644 --- a/nibabel/tests/test_endiancodes.py +++ b/nibabel/tests/test_endiancodes.py @@ -16,6 +16,7 @@ from ..volumeutils import (endian_codes, native_code, swapped_code) + def test_native_swapped(): native_is_le = sys.byteorder == 'little' if native_is_le: diff --git a/nibabel/tests/test_euler.py b/nibabel/tests/test_euler.py index 135127b581..269c14d475 100644 --- a/nibabel/tests/test_euler.py +++ b/nibabel/tests/test_euler.py @@ -23,7 +23,7 @@ # Example rotations ''' eg_rots = [] -params = np.arange(-pi*2,pi*2.5,pi/2) +params = np.arange(-pi * 2, pi * 2.5, pi / 2) for x in params: for y in params: for z in params: @@ -52,9 +52,9 @@ def z_only(z): cosz = np.cos(z) sinz = np.sin(z) return np.array( - [[cosz, -sinz, 0], - [sinz, cosz, 0], - [0, 0, 1]]) + [[cosz, -sinz, 0], + [sinz, cosz, 0], + [0, 0, 1]]) def sympy_euler(z, y, x): @@ -63,10 +63,12 @@ def sympy_euler(z, y, x): sin = math.sin # the following copy / pasted from Sympy - see derivations subdirectory return [ - [ cos(y)*cos(z), -cos(y)*sin(z), sin(y)], - [cos(x)*sin(z) + cos(z)*sin(x)*sin(y), cos(x)*cos(z) - sin(x)*sin(y)*sin(z), -cos(y)*sin(x)], - [sin(x)*sin(z) - cos(x)*cos(z)*sin(y), cos(z)*sin(x) + cos(x)*sin(y)*sin(z), cos(x)*cos(y)] - ] + [cos(y) * cos(z), -cos(y) * sin(z), sin(y)], + [cos(x) * sin(z) + cos(z) * sin(x) * sin(y), cos(x) * + cos(z) - sin(x) * sin(y) * sin(z), -cos(y) * sin(x)], + [sin(x) * sin(z) - cos(x) * cos(z) * sin(y), cos(z) * + sin(x) + cos(x) * sin(y) * sin(z), cos(x) * cos(y)] + ] def is_valid_rotation(M): @@ -100,9 +102,9 @@ def test_basic_euler(): # Applying an opposite rotation same as inverse (the inverse is # the same as the transpose, but just for clarity) yield assert_true, np.allclose(nea.euler2mat(x=-xr), - np.linalg.inv(nea.euler2mat(x=xr))) + np.linalg.inv(nea.euler2mat(x=xr))) + - def test_euler_mat(): M = nea.euler2mat() yield assert_array_equal, M, np.eye(3) @@ -125,10 +127,10 @@ def sympy_euler2quat(z=0, y=0, x=0): cos = math.cos sin = math.sin # the following copy / pasted from Sympy output - return (cos(0.5*x)*cos(0.5*y)*cos(0.5*z) - sin(0.5*x)*sin(0.5*y)*sin(0.5*z), - cos(0.5*x)*sin(0.5*y)*sin(0.5*z) + cos(0.5*y)*cos(0.5*z)*sin(0.5*x), - cos(0.5*x)*cos(0.5*z)*sin(0.5*y) - cos(0.5*y)*sin(0.5*x)*sin(0.5*z), - cos(0.5*x)*cos(0.5*y)*sin(0.5*z) + cos(0.5*z)*sin(0.5*x)*sin(0.5*y)) + return (cos(0.5 * x) * cos(0.5 * y) * cos(0.5 * z) - sin(0.5 * x) * sin(0.5 * y) * sin(0.5 * z), + cos(0.5 * x) * sin(0.5 * y) * sin(0.5 * z) + cos(0.5 * y) * cos(0.5 * z) * sin(0.5 * x), + cos(0.5 * x) * cos(0.5 * z) * sin(0.5 * y) - cos(0.5 * y) * sin(0.5 * x) * sin(0.5 * z), + cos(0.5 * x) * cos(0.5 * y) * sin(0.5 * z) + cos(0.5 * z) * sin(0.5 * x) * sin(0.5 * y)) def crude_mat2euler(M): @@ -169,4 +171,3 @@ def test_quats(): # same rotation matrix M2 = nea.euler2mat(zp, yp, xp) yield assert_array_almost_equal, M1, M2 - diff --git a/nibabel/tests/test_filehandles.py b/nibabel/tests/test_filehandles.py index 2ecadf5840..9c8befeb8d 100644 --- a/nibabel/tests/test_filehandles.py +++ b/nibabel/tests/test_filehandles.py @@ -34,7 +34,7 @@ def test_multiload(): if N > 5000: warn('It would take too long to test file handles, aborting') return - arr = np.arange(24).reshape((2,3,4)) + arr = np.arange(24).reshape((2, 3, 4)) img = Nifti1Image(arr, np.eye(4)) imgs = [] try: diff --git a/nibabel/tests/test_filename_parser.py b/nibabel/tests/test_filename_parser.py index 0c582b4201..d8cb550f60 100644 --- a/nibabel/tests/test_filename_parser.py +++ b/nibabel/tests/test_filename_parser.py @@ -29,7 +29,7 @@ def test_filenames(): types_exts) # If not enforcing extensions, it does the best job it can, # assuming the passed filename is for the first type (in this case - # 'image') + # 'image') tfns = types_filenames('test.funny', types_exts, enforce_extensions=False) assert_equal(tfns, @@ -37,7 +37,7 @@ def test_filenames(): 'image': 'test.funny'}) # .gz and .bz2 suffixes to extensions, by default, are removed # before extension checking etc, and then put back onto every - # returned filename. + # returned filename. tfns = types_filenames('test.img.gz', types_exts) assert_equal(tfns, {'header': 'test.hdr.gz', @@ -60,7 +60,7 @@ def test_filenames(): assert_equal(tfns, {'header': 'test.img.hdr', 'image': 'test.img.gz'}) - # the suffixes we remove and replaces can be any suffixes. + # the suffixes we remove and replaces can be any suffixes. tfns = types_filenames('test.img.bzr', types_exts, ('.bzr',)) assert_equal(tfns, {'header': 'test.hdr.bzr', @@ -107,7 +107,7 @@ def test_filenames(): def test_parse_filename(): - types_exts = (('t1', 'ext1'),('t2', 'ext2')) + types_exts = (('t1', 'ext1'), ('t2', 'ext2')) exp_in_outs = ( (('/path/fname.funny', ()), ('/path/fname', '.funny', None, None)), @@ -131,19 +131,19 @@ def test_parse_filename(): # test case sensitivity res = parse_filename('/path/fnameext2.GZ', types_exts, - ('.gz',), False) # case insensitive again + ('.gz',), False) # case insensitive again assert_equal(res, ('/path/fname', 'ext2', '.GZ', 't2')) res = parse_filename('/path/fnameext2.GZ', types_exts, - ('.gz',), True) # case sensitive + ('.gz',), True) # case sensitive assert_equal(res, ('/path/fnameext2', '.GZ', None, None)) res = parse_filename('/path/fnameEXT2.gz', types_exts, - ('.gz',), False) # case insensitive + ('.gz',), False) # case insensitive assert_equal(res, ('/path/fname', 'EXT2', '.gz', 't2')) res = parse_filename('/path/fnameEXT2.gz', types_exts, - ('.gz',), True) # case sensitive + ('.gz',), True) # case sensitive assert_equal(res, ('/path/fnameEXT2', '', '.gz', None)) @@ -159,5 +159,3 @@ def test_splitext_addext(): # case sensitive res = splitext_addext('fname.ext.FOO', ('.foo', '.bar'), True) assert_equal(res, ('fname.ext', '.FOO', '')) - - diff --git a/nibabel/tests/test_files_interface.py b/nibabel/tests/test_files_interface.py index 418cf66763..8470252b6e 100644 --- a/nibabel/tests/test_files_interface.py +++ b/nibabel/tests/test_files_interface.py @@ -24,7 +24,7 @@ def test_files_spatialimages(): # test files creation in image classes - arr = np.zeros((2,3,4)) + arr = np.zeros((2, 3, 4)) aff = np.eye(4) klasses = [klass for klass in all_image_classes if klass.rw and issubclass(klass, SpatialImage)] @@ -51,7 +51,7 @@ def test_files_spatialimages(): def test_files_interface(): # test high-level interface to files mapping - arr = np.zeros((2,3,4)) + arr = np.zeros((2, 3, 4)) aff = np.eye(4) img = Nifti1Image(arr, aff) # single image @@ -68,7 +68,7 @@ def test_files_interface(): # fileobjs - single image img = Nifti1Image(arr, aff) img.file_map['image'].fileobj = BytesIO() - img.to_file_map() # saves to files + img.to_file_map() # saves to files img2 = Nifti1Image.from_file_map(img.file_map) # img still has correct data assert_array_equal(img2.get_data(), img.get_data()) @@ -78,7 +78,7 @@ def test_files_interface(): # no header yet assert_raises(FileHolderError, img.to_file_map) img.file_map['header'].fileobj = BytesIO() - img.to_file_map() # saves to files + img.to_file_map() # saves to files img2 = Nifti1Pair.from_file_map(img.file_map) # img still has correct data assert_array_equal(img2.get_data(), img.get_data()) diff --git a/nibabel/tests/test_fileslice.py b/nibabel/tests/test_fileslice.py index 7ff86c61d0..f4af7b3d90 100644 --- a/nibabel/tests/test_fileslice.py +++ b/nibabel/tests/test_fileslice.py @@ -27,7 +27,7 @@ def _check_slice(sliceobj): a = np.arange(100).reshape((10, 10)) b = a[sliceobj] if np.isscalar(b): - return # Can't check + return # Can't check # Check if this is a view a[:] = 99 b_is_view = np.all(b == 99) @@ -38,7 +38,7 @@ def test_is_fancy(): slices = (2, [2], [2, 3], Ellipsis, np.array(2), np.array((2, 3))) for slice0 in slices: _check_slice(slice0) - _check_slice((slice0,)) # tuple is same + _check_slice((slice0,)) # tuple is same for slice1 in slices: _check_slice((slice0, slice1)) assert_false(is_fancy((None,))) @@ -154,14 +154,14 @@ def _slices_for_len(L): L - 1, -1, slice(None), - slice(L-1)] + slice(L - 1)] if L > 1: sdefs += [ -2, - slice(1, L-1), - slice(1, L-1, 2), - slice(L-1, 1, -1), - slice(L-1, 1, -2)] + slice(1, L - 1), + slice(1, L - 1, 2), + slice(L - 1, 1, -1), + slice(L - 1, 1, -2)] return tuple(sdefs) @@ -278,8 +278,12 @@ def test_threshold_heuristic(): # Some dummy heuristics for optimize_slicer def _always(slicer, dim_len, stride): return 'full' + + def _partial(slicer, dim_len, stride): return 'contiguous' + + def _never(slicer, dim_len, stride): return None @@ -383,31 +387,31 @@ def test_optimize_slicer(): (slice(2, 8, 1), slice(None, None, 2))) # If this is the slowest changing dimension, heuristic can upgrade None to # contiguous, but not (None, contiguous) to full - assert_equal( # we've done this one already + assert_equal( # we've done this one already optimize_slicer(slice(0, 10, 2), 10, True, False, 4, _always), (slice(None), slice(0, 10, 2))) - assert_equal( # if slowest, just upgrade to contiguous + assert_equal( # if slowest, just upgrade to contiguous optimize_slicer(slice(0, 10, 2), 10, True, True, 4, _always), (slice(0, 10, 1), slice(None, None, 2))) - assert_equal( # contiguous does not upgrade to full + assert_equal( # contiguous does not upgrade to full optimize_slicer(slice(9), 10, True, True, 4, _always), (slice(0, 9, 1), slice(None))) # integer assert_equal( optimize_slicer(0, 10, True, False, 4, _never), (0, 'dropped')) - assert_equal( # can be negative + assert_equal( # can be negative optimize_slicer(-1, 10, True, False, 4, _never), (9, 'dropped')) - assert_equal( # or float + assert_equal( # or float optimize_slicer(0.9, 10, True, False, 4, _never), (0, 'dropped')) - assert_raises(ValueError, # should never get 'contiguous' - optimize_slicer, 0, 10, True, False, 4, _partial) - assert_equal( # full can be forced with heuristic + assert_raises(ValueError, # should never get 'contiguous' + optimize_slicer, 0, 10, True, False, 4, _partial) + assert_equal( # full can be forced with heuristic optimize_slicer(0, 10, True, False, 4, _always), (slice(None), 0)) - assert_equal( # but disabled for slowest changing dimension + assert_equal( # but disabled for slowest changing dimension optimize_slicer(0, 10, True, True, 4, _always), (0, 'dropped')) @@ -485,21 +489,21 @@ def test_optimize_read_slicers(): assert_equal(optimize_read_slicers( (slice(9), slice(None), slice(None)), (10, 6, 2), 4, _depends0), ((slice(None), slice(None), slice(None)), - (slice(0, 9, 1), slice(None), slice(None)))) + (slice(0, 9, 1), slice(None), slice(None)))) assert_equal(optimize_read_slicers( (slice(None), slice(5), slice(None)), (10, 6, 2), 4, _depends0), ((slice(None), slice(0, 5, 1), slice(None)), - (slice(None), slice(None), slice(None)))) + (slice(None), slice(None), slice(None)))) assert_equal(optimize_read_slicers( (slice(None), slice(5), slice(None)), (10, 6, 2), 4, _depends1), ((slice(None), slice(None), slice(None)), - (slice(None), slice(0, 5, 1), slice(None)))) + (slice(None), slice(0, 5, 1), slice(None)))) # Check longs as integer slices sn = slice(None) assert_equal(optimize_read_slicers( (1, 2, 3), (2, 3, 4), 4, _always), ((sn, sn, 3), (1, 2))) - if PY2: # Check we can pass in longs as well + if PY2: # Check we can pass in longs as well assert_equal(optimize_read_slicers( (long(1), long(2), long(3)), (2, 3, 4), 4, _always), ((sn, sn, 3), (1, 2))) @@ -516,7 +520,7 @@ def test_slicers2segments(): assert_equal(slicers2segments((slice(None),), (10,), 7, 4), [[7, 10 * 4]]) assert_equal(slicers2segments((0, slice(None)), (10, 6), 7, 4), - [[7 + 10*4*i, 4] for i in range(6)]) + [[7 + 10 * 4 * i, 4] for i in range(6)]) assert_equal(slicers2segments((slice(None), 0), (10, 6), 7, 4), [[7, 10 * 4]]) assert_equal(slicers2segments((slice(None), slice(None)), (10, 6), 7, 4), @@ -524,7 +528,7 @@ def test_slicers2segments(): assert_equal(slicers2segments( (slice(None), slice(None), 2), (10, 6, 4), 7, 4), [[7 + 10 * 6 * 2 * 4, 10 * 6 * 4]]) - if PY2: # Check we can pass longs on Python 2 + if PY2: # Check we can pass longs on Python 2 assert_equal( slicers2segments((long(0), long(1), long(2)), (10, 6, 4), 7, 4), [[7 + 10 * 4 + 10 * 6 * 2 * 4, 4]]) @@ -543,52 +547,52 @@ def test_calc_slicedefs(): ([[7, 40]], (10,), (), - )) + )) assert_equal( calc_slicedefs((slice(9),), (10,), 4, 7, 'F', _never), ([[7, 36]], (9,), (), - )) + )) assert_equal( calc_slicedefs((slice(1, 9),), (10,), 4, 7, 'F', _never), ([[11, 32]], (8,), (), - )) + )) # Two dimensions, single slice assert_equal( calc_slicedefs((0,), (10, 6), 4, 7, 'F', _never), ([[7, 4], [47, 4], [87, 4], [127, 4], [167, 4], [207, 4]], (6,), (), - )) + )) assert_equal( calc_slicedefs((0,), (10, 6), 4, 7, 'C', _never), ([[7, 6 * 4]], (6,), (), - )) + )) # Two dimensions, contiguous not full assert_equal( calc_slicedefs((1, slice(1, 5)), (10, 6), 4, 7, 'F', _never), ([[51, 4], [91, 4], [131, 4], [171, 4]], (4,), (), - )) + )) assert_equal( calc_slicedefs((1, slice(1, 5)), (10, 6), 4, 7, 'C', _never), - ([[7 + 7*4, 16]], + ([[7 + 7 * 4, 16]], (4,), (), - )) + )) # With full slice first assert_equal( calc_slicedefs((slice(None), slice(1, 5)), (10, 6), 4, 7, 'F', _never), ([[47, 160]], (10, 4), (), - )) + )) # Check effect of heuristic on calc_slicedefs # Even integer slices can generate full when heuristic says so assert_equal( @@ -596,14 +600,14 @@ def test_calc_slicedefs(): ([[7, 10 * 6 * 4]], (10, 6), (1, slice(None)), - )) + )) # Except when last assert_equal( calc_slicedefs((slice(None), 1), (10, 6), 4, 7, 'F', _always), ([[7 + 10 * 4, 10 * 4]], (10,), (), - )) + )) def test_predict_shape(): @@ -631,8 +635,8 @@ def test_predict_shape(): def test_strided_scalar(): # Utility to make numpy array of given shape from scalar using striding for shape, scalar in product( - ((2,), (2, 3,), (2, 3, 4)), - (1, 2, np.int16(3))): + ((2,), (2, 3,), (2, 3, 4)), + (1, 2, np.int16(3))): expected = np.zeros(shape, dtype=np.array(scalar).dtype) + scalar observed = strided_scalar(shape, scalar) assert_array_equal(observed, expected) @@ -643,6 +647,7 @@ def test_strided_scalar(): # This addresses a numpy 1.10 breakage of broadcasting a strided # array without resizing (see GitHub PR #358) assert_false(observed.flags.writeable) + def setval(x): x[..., 0] = 99 # RuntimeError for numpy < 1.10 diff --git a/nibabel/tests/test_floating.py b/nibabel/tests/test_floating.py index 625de66f66..8f6d998a0c 100644 --- a/nibabel/tests/test_floating.py +++ b/nibabel/tests/test_floating.py @@ -18,7 +18,7 @@ IEEE_floats = [np.float32, np.float64] try: np.float16 -except AttributeError: # float16 not present in np < 1.6 +except AttributeError: # float16 not present in np < 1.6 have_float16 = False else: have_float16 = True @@ -57,15 +57,15 @@ def test_type_info(): vals = (info.nmant, info.nexp, width) # Information for PPC head / tail doubles from: # https://developer.apple.com/library/mac/#documentation/Darwin/Reference/Manpages/man3/float.3.html - if vals in ((52, 11, 8), # longdouble is same as double - (63, 15, 12), (63, 15, 16), # intel 80 bit - (112, 15, 16), # real float128 - (106, 11, 16)): # PPC head, tail doubles, expected values + if vals in ((52, 11, 8), # longdouble is same as double + (63, 15, 12), (63, 15, 16), # intel 80 bit + (112, 15, 16), # real float128 + (106, 11, 16)): # PPC head, tail doubles, expected values assert_equal(dict(min=info.min, max=info.max, minexp=info.minexp, maxexp=info.maxexp, nexp=info.nexp, nmant=info.nmant, width=width), infod) - elif vals == (1, 1, 16): # bust info for PPC head / tail longdoubles + elif vals == (1, 1, 16): # bust info for PPC head / tail longdoubles assert_equal(dict(min=dbl_info.min, max=dbl_info.max, minexp=-1022, maxexp=1024, nexp=11, nmant=106, width=16), @@ -101,7 +101,7 @@ def test_check_nmant_nexp(): # Check against type_info for t in ok_floats(): ti = type_info(t) - if ti['nmant'] != 106: # This check does not work for PPC double pair + if ti['nmant'] != 106: # This check does not work for PPC double pair assert_true(_check_nmant(t, ti['nmant'])) assert_true(_check_maxexp(t, ti['maxexp'])) @@ -122,9 +122,9 @@ def test_as_int(): try: nmant = type_info(np.longdouble)['nmant'] except FloatingError: - nmant = 63 # Unknown precision, let's hope it's at least 63 + nmant = 63 # Unknown precision, let's hope it's at least 63 v = np.longdouble(2) ** (nmant + 1) - 1 - assert_equal(as_int(v), 2**(nmant + 1) -1) + assert_equal(as_int(v), 2**(nmant + 1) - 1) # Check for predictable overflow nexp64 = floor_log2(type_info(np.float64)['max']) with np.errstate(over='ignore'): @@ -139,13 +139,13 @@ def test_int_to_float(): for ie3 in IEEE_floats: nmant = type_info(ie3)['nmant'] for p in range(nmant + 3): - i = 2**p+1 + i = 2**p + 1 assert_equal(int_to_float(i, ie3), ie3(i)) assert_equal(int_to_float(-i, ie3), ie3(-i)) # IEEEs in this case are binary formats only nexp = floor_log2(type_info(ie3)['max']) # Values too large for the format - smn, smx = -2**(nexp+1), 2**(nexp+1) + smn, smx = -2**(nexp + 1), 2**(nexp + 1) if ie3 is np.float64: assert_raises(OverflowError, int_to_float, smn, ie3) assert_raises(OverflowError, int_to_float, smx, ie3) @@ -157,23 +157,23 @@ def test_int_to_float(): # up to integer precision of float64 nmant, we get the same result as for # casting directly nmant = type_info(np.float64)['nmant'] - for p in range(nmant+2): # implicit - i = 2**p-1 + for p in range(nmant + 2): # implicit + i = 2**p - 1 assert_equal(int_to_float(i, LD), LD(i)) assert_equal(int_to_float(-i, LD), LD(-i)) # Above max of float64, we're hosed nexp64 = floor_log2(type_info(np.float64)['max']) - smn64, smx64 = -2**(nexp64+1), 2**(nexp64+1) + smn64, smx64 = -2**(nexp64 + 1), 2**(nexp64 + 1) # The algorithm here implemented goes through float64, so supermax and # supermin will cause overflow errors assert_raises(OverflowError, int_to_float, smn64, LD) assert_raises(OverflowError, int_to_float, smx64, LD) try: nmant = type_info(np.longdouble)['nmant'] - except FloatingError: # don't know where to test + except FloatingError: # don't know where to test return # test we recover precision just above nmant - i = 2**(nmant+1)-1 + i = 2**(nmant + 1) - 1 assert_equal(as_int(int_to_float(i, LD)), i) assert_equal(as_int(int_to_float(-i, LD)), -i) # Test no error for longs @@ -215,8 +215,8 @@ def test_floor_exact_64(): assert_equal(len(gaps), 1) gap = gaps.pop() assert_equal(gap, int(gap)) - test_val = 2**(e+1)-1 - assert_equal(floor_exact(test_val, np.float64), 2**(e+1)-int(gap)) + test_val = 2**(e + 1) - 1 + assert_equal(floor_exact(test_val, np.float64), 2**(e + 1) - int(gap)) def test_floor_exact(): @@ -230,8 +230,8 @@ def test_floor_exact(): to_test.append(np.longdouble) # When numbers go above int64 - I believe, numpy comparisons break down, # so we have to cast to int before comparison - int_flex = lambda x, t : as_int(floor_exact(x, t)) - int_ceex = lambda x, t : as_int(ceil_exact(x, t)) + int_flex = lambda x, t: as_int(floor_exact(x, t)) + int_ceex = lambda x, t: as_int(ceil_exact(x, t)) for t in to_test: # A number bigger than the range returns the max info = type_info(t) @@ -241,47 +241,47 @@ def test_floor_exact(): assert_equal(floor_exact(-2**5000, t), -np.inf) assert_equal(ceil_exact(-2**5000, t), -np.inf) # Check around end of integer precision - nmant = info['nmant'] - for i in range(nmant+1): + nmant = info['nmant'] + for i in range(nmant + 1): iv = 2**i # up to 2**nmant should be exactly representable for func in (int_flex, int_ceex): assert_equal(func(iv, t), iv) assert_equal(func(-iv, t), -iv) - assert_equal(func(iv-1, t), iv-1) - assert_equal(func(-iv+1, t), -iv+1) + assert_equal(func(iv - 1, t), iv - 1) + assert_equal(func(-iv + 1, t), -iv + 1) if t is np.longdouble and ( - on_powerpc() or - longdouble_precision_improved()): + on_powerpc() or + longdouble_precision_improved()): # The nmant value for longdouble on PPC appears to be conservative, # so that the tests for behavior above the nmant range fail. # windows longdouble can change from float64 to Intel80 in some # situations, in which case nmant will not be correct continue # Confirm to ourselves that 2**(nmant+1) can't be exactly represented - iv = 2**(nmant+1) - assert_equal(int_flex(iv+1, t), iv) - assert_equal(int_ceex(iv+1, t), iv+2) + iv = 2**(nmant + 1) + assert_equal(int_flex(iv + 1, t), iv) + assert_equal(int_ceex(iv + 1, t), iv + 2) # negatives - assert_equal(int_flex(-iv-1, t), -iv-2) - assert_equal(int_ceex(-iv-1, t), -iv) + assert_equal(int_flex(-iv - 1, t), -iv - 2) + assert_equal(int_ceex(-iv - 1, t), -iv) # The gap in representable numbers is 2 above 2**(nmant+1), 4 above # 2**(nmant+2), and so on. for i in range(5): - iv = 2**(nmant+1+i) - gap = 2**(i+1) - assert_equal(as_int(t(iv) + t(gap)), iv+gap) + iv = 2**(nmant + 1 + i) + gap = 2**(i + 1) + assert_equal(as_int(t(iv) + t(gap)), iv + gap) for j in range(1, gap): - assert_equal(int_flex(iv+j, t), iv) - assert_equal(int_flex(iv+gap+j, t), iv+gap) - assert_equal(int_ceex(iv+j, t), iv+gap) - assert_equal(int_ceex(iv+gap+j, t), iv+2*gap) + assert_equal(int_flex(iv + j, t), iv) + assert_equal(int_flex(iv + gap + j, t), iv + gap) + assert_equal(int_ceex(iv + j, t), iv + gap) + assert_equal(int_ceex(iv + gap + j, t), iv + 2 * gap) # negatives for j in range(1, gap): - assert_equal(int_flex(-iv-j, t), -iv-gap) - assert_equal(int_flex(-iv-gap-j, t), -iv-2*gap) - assert_equal(int_ceex(-iv-j, t), -iv) - assert_equal(int_ceex(-iv-gap-j, t), -iv-gap) + assert_equal(int_flex(-iv - j, t), -iv - gap) + assert_equal(int_flex(-iv - gap - j, t), -iv - 2 * gap) + assert_equal(int_ceex(-iv - j, t), -iv) + assert_equal(int_ceex(-iv - gap - j, t), -iv - gap) def test_usable_binary128(): diff --git a/nibabel/tests/test_funcs.py b/nibabel/tests/test_funcs.py index 20d11578b3..1841dbdd9a 100644 --- a/nibabel/tests/test_funcs.py +++ b/nibabel/tests/test_funcs.py @@ -21,6 +21,8 @@ from nose.tools import (assert_true, assert_false, assert_equal, assert_raises) _counter = 0 + + def _as_fname(img): global _counter fname = 'img%3d.nii' % _counter @@ -55,10 +57,10 @@ def test_concat(): data1_numel = np.asarray(data1_shape).prod() data1 = np.arange(data1_numel).reshape(data1_shape) img1_mem = Nifti1Image(data1, affine) - img2_mem = Nifti1Image(data1, affine+1) # bad affine + img2_mem = Nifti1Image(data1, affine + 1) # bad affine # Loop over every possible axis, including None (explicit and implied) - for axis in (list(range(-(dim-2), (dim-1))) + [None, '__default__']): + for axis in (list(range(-(dim - 2), (dim - 1))) + [None, '__default__']): # Allow testing default vs. passing explicit param if axis == '__default__': @@ -104,12 +106,14 @@ def test_concat(): except ValueError as ve: assert_true(expect_error, str(ve)) else: - assert_false(expect_error, "Expected a concatenation error, but got none.") + assert_false( + expect_error, "Expected a concatenation error, but got none.") assert_array_equal(all_imgs.get_data(), all_data) assert_array_equal(all_imgs.affine, affine) # check that not-matching affines raise error - assert_raises(ValueError, concat_images, [img0, img2], **concat_imgs_kwargs) + assert_raises(ValueError, concat_images, [ + img0, img2], **concat_imgs_kwargs) # except if check_affines is False try: @@ -117,19 +121,20 @@ def test_concat(): except ValueError as ve: assert_true(expect_error, str(ve)) else: - assert_false(expect_error, "Expected a concatenation error, but got none.") + assert_false( + expect_error, "Expected a concatenation error, but got none.") assert_array_equal(all_imgs.get_data(), all_data) assert_array_equal(all_imgs.affine, affine) def test_closest_canonical(): - arr = np.arange(24).reshape((2,3,4,1)) + arr = np.arange(24).reshape((2, 3, 4, 1)) # no funky stuff, returns same thing img = Nifti1Image(arr, np.eye(4)) xyz_img = as_closest_canonical(img) assert_true(img is xyz_img) # a axis flip - img = Nifti1Image(arr, np.diag([-1,1,1,1])) + img = Nifti1Image(arr, np.diag([-1, 1, 1, 1])) xyz_img = as_closest_canonical(img) assert_false(img is xyz_img) out_arr = xyz_img.get_data() @@ -138,7 +143,7 @@ def test_closest_canonical(): xyz_img = as_closest_canonical(img, True) # but there is if the affine is not diagonal aff = np.eye(4) - aff[0,1] = 0.1 + aff[0, 1] = 0.1 # although it's more or less canonical already img = Nifti1Image(arr, aff) xyz_img = as_closest_canonical(img) diff --git a/nibabel/tests/test_image_api.py b/nibabel/tests/test_image_api.py index 46fbe123c3..908109f151 100644 --- a/nibabel/tests/test_image_api.py +++ b/nibabel/tests/test_image_api.py @@ -122,7 +122,7 @@ def validate_affine_deprecated(self, imaker, params): def validate_header(self, imaker, params): # Check header API img = imaker() - hdr = img.header # we can fetch it + hdr = img.header # we can fetch it # Change shape in header, check this changes img.header shape = hdr.get_data_shape() new_shape = (shape[0] + 1,) + shape[1:] @@ -164,7 +164,7 @@ def validate_dtype(self, imaker, params): rt_img = bytesio_round_trip(img) assert_equal(rt_img.get_data_dtype().type, params['dtype']) # Setting to a different dtype - img.set_data_dtype(np.float32) # assumed supported for all formats + img.set_data_dtype(np.float32) # assumed supported for all formats assert_equal(img.get_data_dtype().type, np.float32) # dtype survives round trip if self.can_save: @@ -214,7 +214,7 @@ def validate_data(self, imaker, params): assert_true(img.in_memory) data_again = img.get_data() assert_true(data is data_again) - else: # not proxy + else: # not proxy for caching in (None, 'fill', 'unchanged'): img = imaker() get_data_func = (img.get_data if caching is None else @@ -256,7 +256,7 @@ def validate_filenames(self, imaker, params): if not self.can_save: raise SkipTest img = imaker() - img.set_data_dtype(np.float32) # to avoid rounding in load / save + img.set_data_dtype(np.float32) # to avoid rounding in load / save # The bytesio_round_trip helper tests bytesio load / save via file_map rt_img = bytesio_round_trip(img) assert_array_equal(img.shape, rt_img.shape) @@ -273,7 +273,7 @@ def validate_filenames(self, imaker, params): rt_img = img.__class__.from_filename(fname) assert_array_equal(img.shape, rt_img.shape) assert_almost_equal(img.get_data(), rt_img.get_data()) - del rt_img # to allow windows to delete the directory + del rt_img # to allow windows to delete the directory def validate_no_slicing(self, imaker, params): img = imaker() @@ -292,7 +292,7 @@ class LoadImageAPI(GenericImageAPI): def obj_params(self): for img_params in self.example_images: - yield lambda : self.loader(img_params['fname']), img_params + yield lambda: self.loader(img_params['fname']), img_params def validate_path_maybe_image(self, imaker, params): for img_params in self.example_images: @@ -322,8 +322,9 @@ def obj_params(self): yield func, params # Create a new images aff = np.diag([1, 2, 3, 1]) + def make_imaker(arr, aff, header=None): - return lambda : self.image_maker(arr, aff, header) + return lambda: self.image_maker(arr, aff, header) for shape in self.example_shapes: for dtype in (np.uint8, np.int16, np.float32): arr = np.arange(np.prod(shape), dtype=np.float32).reshape(shape) @@ -331,17 +332,18 @@ def make_imaker(arr, aff, header=None): hdr.set_data_dtype(dtype) func = make_imaker(arr.copy(), aff, hdr) params = dict( - dtype = dtype, - affine = aff, - data = arr, - shape = shape, - is_proxy = False) + dtype=dtype, + affine=aff, + data=arr, + shape=shape, + is_proxy=False) yield func, params if not self.can_save: return # Add a proxy image # We assume that loading from a fileobj creates a proxy image params['is_proxy'] = True + def prox_imaker(): img = self.image_maker(arr, aff, hdr) rt_img = bytesio_round_trip(img) @@ -407,6 +409,7 @@ class TestMinc1API(ImageHeaderAPI): class TestMinc2API(TestMinc1API): + def __init__(self): if not have_h5py: raise SkipTest('Need h5py for these tests') @@ -417,6 +420,7 @@ def __init__(self): class TestPARRECAPI(LoadImageAPI): + def loader(self, fname): return parrec.load(fname) @@ -434,7 +438,7 @@ def loader(self, fname): class TestMGHAPI(ImageHeaderAPI): klass = image_maker = MGHImage - example_shapes = ((2, 3, 4), (2, 3, 4, 5)) # MGH can only do >= 3D + example_shapes = ((2, 3, 4), (2, 3, 4, 5)) # MGH can only do >= 3D has_scaling = True can_save = True standard_extension = '.mgh' diff --git a/nibabel/tests/test_image_load_save.py b/nibabel/tests/test_image_load_save.py index dbaf76a272..d43d1ee581 100644 --- a/nibabel/tests/test_image_load_save.py +++ b/nibabel/tests/test_image_load_save.py @@ -111,7 +111,7 @@ def test_save_load(): npt = np.float32 data = np.arange(np.prod(shape), dtype=npt).reshape(shape) affine = np.diag([1, 2, 3, 1]) - affine[:3,3] = [3,2,1] + affine[:3, 3] = [3, 2, 1] img = ni1.Nifti1Image(data, affine) img.set_data_dtype(npt) with InTemporaryDirectory() as pth: @@ -124,9 +124,9 @@ def test_save_load(): assert_array_equal(re_img.affine, affine) # These and subsequent del statements are to prevent confusing # windows errors when trying to open files or delete the - # temporary directory. + # temporary directory. del re_img - if have_scipy: # skip we we cannot read .mat files + if have_scipy: # skip we we cannot read .mat files spm2.save(img, sifn) re_img2 = nils.load(sifn) assert_true(isinstance(re_img2, spm2.Spm2AnalyzeImage)) @@ -136,7 +136,7 @@ def test_save_load(): spm99.save(img, sifn) re_img3 = nils.load(sifn) assert_true(isinstance(re_img3, - spm99.Spm99AnalyzeImage)) + spm99.Spm99AnalyzeImage)) assert_array_equal(re_img3.get_data(), data) assert_array_equal(re_img3.affine, affine) ni1.save(re_img3, nifn) @@ -154,7 +154,7 @@ def test_two_to_one(): npt = np.float32 data = np.arange(np.prod(shape), dtype=npt).reshape(shape) affine = np.diag([1, 2, 3, 1]) - affine[:3,3] = [3,2,1] + affine[:3, 3] = [3, 2, 1] # single file format img = ni1.Nifti1Image(data, affine) assert_equal(img.header['magic'], b'n+1') @@ -201,7 +201,7 @@ def test_two_to_one(): def test_negative_load_save(): - shape = (1,2,5) + shape = (1, 2, 5) data = np.arange(10).reshape(shape) - 10.0 affine = np.eye(4) hdr = ni1.Nifti1Header() diff --git a/nibabel/tests/test_image_types.py b/nibabel/tests/test_image_types.py index b6ca7ea938..e72ad6bbbc 100644 --- a/nibabel/tests/test_image_types.py +++ b/nibabel/tests/test_image_types.py @@ -89,7 +89,7 @@ def check_img(img_path, img_klass, sniff_mode, sniff, expect_success, empty=b'', # pass an empty sniff, should query in fn irrelevant=b'a' * (sizeof_hdr - 1), # A too-small sniff, query bad_sniff=b'a' * sizeof_hdr, # Bad sniff, should fail - ).items(): + ).items(): for klass in img_klasses: if klass == expected_img_klass: diff --git a/nibabel/tests/test_keywordonly.py b/nibabel/tests/test_keywordonly.py index bdf662fdc0..ba8bce0d94 100644 --- a/nibabel/tests/test_keywordonly.py +++ b/nibabel/tests/test_keywordonly.py @@ -17,6 +17,7 @@ def func(an_arg): assert_raises(TypeError, dec_func, 1, 2) assert_raises(TypeError, dec_func, 1, akeyarg=3) assert_equal(dec_func.__doc__, 'My docstring') + @kw_only_func(1) def kw_func(an_arg, a_kwarg='thing'): "Another docstring" @@ -26,7 +27,9 @@ def kw_func(an_arg, a_kwarg='thing'): assert_equal(kw_func(1, a_kwarg=2), (1, 2)) assert_raises(TypeError, kw_func, 1, akeyarg=3) assert_equal(kw_func.__doc__, 'Another docstring') + class C(object): + @kw_only_meth(1) def kw_meth(self, an_arg, a_kwarg='thing'): "Method docstring" diff --git a/nibabel/tests/test_loadsave.py b/nibabel/tests/test_loadsave.py index 4a40ccd344..923252fa1f 100644 --- a/nibabel/tests/test_loadsave.py +++ b/nibabel/tests/test_loadsave.py @@ -33,7 +33,7 @@ def test_read_img_data(): 'minc1_4d.mnc', 'test.mgz', 'tiny.mnc' - ): + ): fpath = pjoin(data_path, fname) img = load(fpath) data = img.get_data() diff --git a/nibabel/tests/test_minc1.py b/nibabel/tests/test_minc1.py index 5c332f3e21..0e8d4e8d47 100644 --- a/nibabel/tests/test_minc1.py +++ b/nibabel/tests/test_minc1.py @@ -39,50 +39,50 @@ # item. EXAMPLE_IMAGES = [ dict( - fname = pjoin(data_path, 'tiny.mnc'), - shape = (10,20,20), - dtype = np.uint8, - affine = np.array([[0, 0, 2.0, -20], - [0, 2.0, 0, -20], - [2.0, 0, 0, -10], - [0, 0, 0, 1]]), - zooms = (2., 2., 2.), + fname=pjoin(data_path, 'tiny.mnc'), + shape=(10, 20, 20), + dtype=np.uint8, + affine=np.array([[0, 0, 2.0, -20], + [0, 2.0, 0, -20], + [2.0, 0, 0, -10], + [0, 0, 0, 1]]), + zooms=(2., 2., 2.), # These values from SPM2 - data_summary = dict( - min = 0.20784314, - max = 0.74901961, - mean = 0.60602819), - is_proxy = True), + data_summary=dict( + min=0.20784314, + max=0.74901961, + mean=0.60602819), + is_proxy=True), dict( - fname = pjoin(data_path, 'minc1_1_scale.mnc'), - shape = (10,20,20), - dtype = np.uint8, - affine = np.array([[0, 0, 2.0, -20], - [0, 2.0, 0, -20], - [2.0, 0, 0, -10], - [0, 0, 0, 1]]), - zooms = (2., 2., 2.), + fname=pjoin(data_path, 'minc1_1_scale.mnc'), + shape=(10, 20, 20), + dtype=np.uint8, + affine=np.array([[0, 0, 2.0, -20], + [0, 2.0, 0, -20], + [2.0, 0, 0, -10], + [0, 0, 0, 1]]), + zooms=(2., 2., 2.), # These values from mincstats - data_summary = dict( - min = 0.2082842439, - max = 0.2094327615, - mean = 0.2091292083), - is_proxy = True), + data_summary=dict( + min=0.2082842439, + max=0.2094327615, + mean=0.2091292083), + is_proxy=True), dict( - fname = pjoin(data_path, 'minc1_4d.mnc'), - shape = (2, 10,20,20), - dtype = np.uint8, - affine = np.array([[0, 0, 2.0, -20], - [0, 2.0, 0, -20], - [2.0, 0, 0, -10], - [0, 0, 0, 1]]), - zooms = (1., 2., 2., 2.), + fname=pjoin(data_path, 'minc1_4d.mnc'), + shape=(2, 10, 20, 20), + dtype=np.uint8, + affine=np.array([[0, 0, 2.0, -20], + [0, 2.0, 0, -20], + [2.0, 0, 0, -10], + [0, 0, 0, 1]]), + zooms=(1., 2., 2., 2.), # These values from mincstats - data_summary = dict( - min = 0.2078431373, - max = 1.498039216, - mean = 0.9090422837), - is_proxy = True), + data_summary=dict( + min=0.2078431373, + max=1.498039216, + mean=0.9090422837), + is_proxy=True), ] @@ -102,7 +102,7 @@ def test_old_namespace(): previous_import = isinstance(minc, types.ModuleType) if not previous_import: assert_true(isinstance(minc, ModuleProxy)) - old_minc1image = minc.Minc1Image # just to check it works + old_minc1image = minc.Minc1Image # just to check it works # There may or may not be a warning raised on accessing the proxy, # depending on whether the minc.py module is already imported in this # test run. @@ -125,7 +125,7 @@ def test_old_namespace(): assert_false(MincFile is Minc1File) assert_equal(warns, []) mf = MincFile(netcdf_file(EG_FNAME)) - assert_equal(mf.get_data_shape(), (10, 20 , 20)) + assert_equal(mf.get_data_shape(), (10, 20, 20)) # Call to create object created warning assert_equal(warns.pop(0).category, FutureWarning) @@ -155,13 +155,13 @@ def test_mincfile_slicing(self): mnc = self.file_class(mnc_obj) data = mnc.get_scaled_data() for slicedef in ((slice(None),), - (1,), - (slice(None), 1), - (1, slice(None)), - (slice(None), 1, 1), - (1, slice(None), 1), - (1, 1, slice(None)), - ): + (1,), + (slice(None), 1), + (1, slice(None)), + (slice(None), 1, 1), + (1, slice(None), 1), + (1, 1, slice(None)), + ): sliced_data = mnc.get_scaled_data(slicedef) assert_array_equal(sliced_data, data[slicedef]) diff --git a/nibabel/tests/test_minc2.py b/nibabel/tests/test_minc2.py index ccada38d2c..3c955a6947 100644 --- a/nibabel/tests/test_minc2.py +++ b/nibabel/tests/test_minc2.py @@ -30,50 +30,50 @@ # item. EXAMPLE_IMAGES = [ dict( - fname = pjoin(data_path, 'small.mnc'), - shape = (18, 28, 29), - dtype = np.int16, - affine = np.array([[0, 0, 7.0, -98], - [0, 8.0, 0, -134], - [9.0, 0, 0, -72], - [0, 0, 0, 1]]), - zooms = (9., 8., 7.), + fname=pjoin(data_path, 'small.mnc'), + shape=(18, 28, 29), + dtype=np.int16, + affine=np.array([[0, 0, 7.0, -98], + [0, 8.0, 0, -134], + [9.0, 0, 0, -72], + [0, 0, 0, 1]]), + zooms=(9., 8., 7.), # These values from mincstats - data_summary = dict( - min = 0.1185331417, - max = 92.87690699, - mean = 31.2127952), - is_proxy = True), + data_summary=dict( + min=0.1185331417, + max=92.87690699, + mean=31.2127952), + is_proxy=True), dict( - fname = pjoin(data_path, 'minc2_1_scale.mnc'), - shape = (10,20,20), - dtype = np.uint8, - affine = np.array([[0, 0, 2.0, -20], - [0, 2.0, 0, -20], - [2.0, 0, 0, -10], - [0, 0, 0, 1]]), - zooms = (2., 2., 2.), + fname=pjoin(data_path, 'minc2_1_scale.mnc'), + shape=(10, 20, 20), + dtype=np.uint8, + affine=np.array([[0, 0, 2.0, -20], + [0, 2.0, 0, -20], + [2.0, 0, 0, -10], + [0, 0, 0, 1]]), + zooms=(2., 2., 2.), # These values from mincstats - data_summary = dict( - min = 0.2082842439, - max = 0.2094327615, - mean = 0.2091292083), - is_proxy = True), + data_summary=dict( + min=0.2082842439, + max=0.2094327615, + mean=0.2091292083), + is_proxy=True), dict( - fname = pjoin(data_path, 'minc2_4d.mnc'), - shape = (2, 10,20,20), - dtype = np.uint8, - affine = np.array([[0, 0, 2.0, -20], - [0, 2.0, 0, -20], - [2.0, 0, 0, -10], - [0, 0, 0, 1]]), - zooms = (1., 2., 2., 2.), + fname=pjoin(data_path, 'minc2_4d.mnc'), + shape=(2, 10, 20, 20), + dtype=np.uint8, + affine=np.array([[0, 0, 2.0, -20], + [0, 2.0, 0, -20], + [2.0, 0, 0, -10], + [0, 0, 0, 1]]), + zooms=(1., 2., 2., 2.), # These values from mincstats - data_summary = dict( - min = 0.2078431373, - max = 1.498039216, - mean = 0.9090422837), - is_proxy = True) + data_summary=dict( + min=0.2078431373, + max=1.498039216, + mean=0.9090422837), + is_proxy=True) ] if have_h5py: diff --git a/nibabel/tests/test_minc2_data.py b/nibabel/tests/test_minc2_data.py index 48c2f286a2..1ec4999a43 100644 --- a/nibabel/tests/test_minc2_data.py +++ b/nibabel/tests/test_minc2_data.py @@ -45,17 +45,17 @@ class TestEPIFrame(object): zooms = [-0.8984375, -0.8984375, 3.] starts = [117.25609125, 138.89861125, -54.442028] example_params = dict( - fname = os.path.join(MINC2_PATH, 'mincex_EPI-frame.mnc'), - shape = (40, 256, 256), - type = np.int16, - affine = _make_affine((z_cos, y_cos, x_cos), - zooms[::-1], - starts[::-1]), - zooms = [abs(v) for v in zooms[::-1]], + fname=os.path.join(MINC2_PATH, 'mincex_EPI-frame.mnc'), + shape=(40, 256, 256), + type=np.int16, + affine=_make_affine((z_cos, y_cos, x_cos), + zooms[::-1], + starts[::-1]), + zooms=[abs(v) for v in zooms[::-1]], # These values from mincstats - min = 0., - max = 1273, - mean = 93.52085367) + min=0., + max=1273, + mean=93.52085367) @needs_nibabel_data('nitest-minc2') def test_load(self): @@ -87,27 +87,27 @@ class TestB0(TestEPIFrame): zooms = [-0.8984375, -0.8984375, 6.49999990444107] starts = [105.473101260826, 151.74885125, -61.8714747993248] example_params = dict( - fname = os.path.join(MINC2_PATH, 'mincex_diff-B0.mnc'), - shape = (19, 256, 256), - type = np.int16, - affine = _make_affine((z_cos, y_cos, x_cos), - zooms[::-1], - starts[::-1]), - zooms = [abs(v) for v in zooms[::-1]], + fname=os.path.join(MINC2_PATH, 'mincex_diff-B0.mnc'), + shape=(19, 256, 256), + type=np.int16, + affine=_make_affine((z_cos, y_cos, x_cos), + zooms[::-1], + starts[::-1]), + zooms=[abs(v) for v in zooms[::-1]], # These values from mincstats - min = 4.566971917, - max = 3260.121093, - mean = 163.8305553) + min=4.566971917, + max=3260.121093, + mean=163.8305553) class TestFA(TestEPIFrame): example_params = TestB0.example_params.copy() new_params = dict( - fname = os.path.join(MINC2_PATH, 'mincex_diff-FA.mnc'), + fname=os.path.join(MINC2_PATH, 'mincex_diff-FA.mnc'), # These values from mincstats - min = 0.008068881038, - max = 1.224754546, - mean = 0.7520087469) + min=0.008068881038, + max=1.224754546, + mean=0.7520087469) example_params.update(new_params) @@ -118,17 +118,17 @@ class TestGado(TestEPIFrame): zooms = [1, -1, -1] starts = [-75.76775, 115.80462, 81.38605] example_params = dict( - fname = os.path.join(MINC2_PATH, 'mincex_gado-contrast.mnc'), - shape = (100, 170, 146), - type = np.int16, - affine = _make_affine((z_cos, y_cos, x_cos), - zooms[::-1], - starts[::-1]), - zooms = [abs(v) for v in zooms[::-1]], + fname=os.path.join(MINC2_PATH, 'mincex_gado-contrast.mnc'), + shape=(100, 170, 146), + type=np.int16, + affine=_make_affine((z_cos, y_cos, x_cos), + zooms[::-1], + starts[::-1]), + zooms=[abs(v) for v in zooms[::-1]], # These values from mincstats - min = 0, - max = 938668.8698, - mean = 128169.3488) + min=0, + max=938668.8698, + mean=128169.3488) class TestT1(TestEPIFrame): @@ -138,37 +138,37 @@ class TestT1(TestEPIFrame): zooms = [1, 1, 1] starts = [-90, -126, -12] example_params = dict( - fname = os.path.join(MINC2_PATH, 'mincex_t1.mnc'), - shape = (110, 217, 181), - type = np.int16, - affine = _make_affine((z_cos, y_cos, x_cos), - zooms[::-1], - starts[::-1]), - zooms = [abs(v) for v in zooms[::-1]], + fname=os.path.join(MINC2_PATH, 'mincex_t1.mnc'), + shape=(110, 217, 181), + type=np.int16, + affine=_make_affine((z_cos, y_cos, x_cos), + zooms[::-1], + starts[::-1]), + zooms=[abs(v) for v in zooms[::-1]], # These values from mincstats - min = 0, - max = 100, - mean = 23.1659928) + min=0, + max=100, + mean=23.1659928) class TestPD(TestEPIFrame): example_params = TestT1.example_params.copy() new_params = dict( - fname = os.path.join(MINC2_PATH, 'mincex_pd.mnc'), + fname=os.path.join(MINC2_PATH, 'mincex_pd.mnc'), # These values from mincstats - min = 0, - max = 102.5024482, - mean = 23.82625718) + min=0, + max=102.5024482, + mean=23.82625718) example_params.update(new_params) class TestMask(TestEPIFrame): example_params = TestT1.example_params.copy() new_params = dict( - fname = os.path.join(MINC2_PATH, 'mincex_mask.mnc'), - type = np.uint8, + fname=os.path.join(MINC2_PATH, 'mincex_mask.mnc'), + type=np.uint8, # These values from mincstats - min = 0, - max = 1, - mean = 0.3817466618) + min=0, + max=1, + mean=0.3817466618) example_params.update(new_params) diff --git a/nibabel/tests/test_nifti1.py b/nibabel/tests/test_nifti1.py index 0fd7e213e6..a505e843b0 100644 --- a/nibabel/tests/test_nifti1.py +++ b/nibabel/tests/test_nifti1.py @@ -260,8 +260,8 @@ def test_freesurfer_large_vector_hack(self): dim_type = hdr.template_dtype['dim'].base glmin = hdr.template_dtype['glmin'].base too_big = int(np.iinfo(dim_type).max) + 1 - hdr.set_data_shape((too_big-1, 1, 1)) - assert_equal(hdr.get_data_shape(), (too_big-1, 1, 1)) + hdr.set_data_shape((too_big - 1, 1, 1)) + assert_equal(hdr.get_data_shape(), (too_big - 1, 1, 1)) # The freesurfer case full_shape = (too_big, 1, 1, 1, 1, 1, 1) for dim in range(3, 8): @@ -289,15 +289,15 @@ def test_freesurfer_large_vector_hack(self): # Outside range of glmin raises error far_too_big = int(np.iinfo(glmin).max) + 1 with suppress_warnings(): - hdr.set_data_shape((far_too_big-1, 1, 1)) - assert_equal(hdr.get_data_shape(), (far_too_big-1, 1, 1)) + hdr.set_data_shape((far_too_big - 1, 1, 1)) + assert_equal(hdr.get_data_shape(), (far_too_big - 1, 1, 1)) assert_raises(HeaderDataError, hdr.set_data_shape, (far_too_big, 1, 1)) # glmin of zero raises error (implausible vector length) hdr.set_data_shape((-1, 1, 1)) hdr['glmin'] = 0 assert_raises(HeaderDataError, hdr.get_data_shape) # Lists or tuples or arrays will work for setting shape - for shape in ((too_big-1, 1, 1), (too_big, 1, 1)): + for shape in ((too_big - 1, 1, 1), (too_big, 1, 1)): for constructor in (list, tuple, np.array): with suppress_warnings(): hdr.set_data_shape(constructor(shape)) @@ -433,7 +433,7 @@ def test_quaternion(self): hdr['quatern_d'] = 0 assert_true(np.allclose(hdr.get_qform_quaternion(), [0, 1, 0, 0])) # Check threshold set correctly for float32 - hdr['quatern_b'] = 1+np.finfo(self.quat_dtype).eps + hdr['quatern_b'] = 1 + np.finfo(self.quat_dtype).eps assert_array_almost_equal(hdr.get_qform_quaternion(), [0, 1, 0, 0]) def test_qform(self): @@ -495,7 +495,7 @@ def test_slice_times(self): # values in a predictable way, for the tests below. _stringer = lambda val: val is not None and '%2.1f' % val or None _print_me = lambda s: list(map(_stringer, s)) - #The following examples are from the nifti1.h documentation. + # The following examples are from the nifti1.h documentation. hdr['slice_code'] = slice_order_codes['sequential increasing'] assert_equal(_print_me(hdr.get_slice_times()), ['0.0', '0.1', '0.2', '0.3', '0.4', '0.5', '0.6']) @@ -548,7 +548,7 @@ def test_intents(self): ehdr = self.header_class() ehdr.set_intent('t test', (10,), name='some score') assert_equal(ehdr.get_intent(), - ('t test', (10.0,), 'some score')) + ('t test', (10.0,), 'some score')) # invalid intent name assert_raises(KeyError, ehdr.set_intent, 'no intention') # too many parameters diff --git a/nibabel/tests/test_openers.py b/nibabel/tests/test_openers.py index 2900a0437e..550d0d414d 100644 --- a/nibabel/tests/test_openers.py +++ b/nibabel/tests/test_openers.py @@ -25,10 +25,13 @@ class Lunk(object): # bare file-like for testing closed = False + def __init__(self, message): - self.message=message + self.message = message + def write(self): pass + def read(self): return self.message @@ -93,6 +96,7 @@ def test_BinOpener(): class TestImageOpener: + def setUp(self): self.compress_ext_map = ImageOpener.compress_ext_map.copy() @@ -151,6 +155,7 @@ def test_compressionlevel(): # bzip2 needs a fairly large file to show differences in compression level many_selves = my_self * 50 # Test we can set default compression at class level + class MyOpener(Opener): default_compresslevel = 5 with InTemporaryDirectory(): @@ -174,6 +179,7 @@ class MyOpener(Opener): def test_compressed_ext_case(): # Test openers usually ignore case for compressed exts contents = b'palindrome of Bolton is notlob' + class StrictOpener(Opener): compress_ext_icase = False exts = ('gz', 'bz2', 'GZ', 'gZ', 'BZ2', 'Bz2') @@ -193,7 +199,7 @@ class StrictOpener(Opener): with StrictOpener(fname, 'rb') as fobj: assert_equal(fobj.read(), contents) lext = ext.lower() - if lext != ext: # extension should not be recognized -> file + if lext != ext: # extension should not be recognized -> file assert_true(isinstance(fobj.fobj, file_class)) elif lext == 'gz': assert_true(isinstance(fobj.fobj, GzipFile)) @@ -223,6 +229,7 @@ def test_set_extensions(): assert_true(hasattr(fobj.fobj, 'compress')) with Opener('test.glrph', 'w') as fobj: assert_false(hasattr(fobj.fobj, 'compress')) + class MyOpener(Opener): compress_ext_map = Opener.compress_ext_map.copy() compress_ext_map['.glrph'] = Opener.gz_def @@ -254,7 +261,7 @@ def test_close_if_mine(): def test_iter(): # Check we can iterate over lines, if the underlying file object allows it lines = \ -"""On the + """On the blue ridged mountains of virginia diff --git a/nibabel/tests/test_orientations.py b/nibabel/tests/test_orientations.py index e2f0c9b2fd..096684befb 100644 --- a/nibabel/tests/test_orientations.py +++ b/nibabel/tests/test_orientations.py @@ -14,66 +14,66 @@ from numpy.testing import assert_array_equal, assert_array_almost_equal -from ..orientations import (io_orientation, ornt_transform, inv_ornt_aff, - flip_axis, apply_orientation, OrientationError, +from ..orientations import (io_orientation, ornt_transform, inv_ornt_aff, + flip_axis, apply_orientation, OrientationError, ornt2axcodes, axcodes2ornt, aff2axcodes) from ..affines import from_matvec, to_matvec IN_ARRS = [np.eye(4), - [[0,0,1,0], - [0,1,0,0], - [1,0,0,0], - [0,0,0,1]], - [[0,1,0,0], - [0,0,1,0], - [1,0,0,0], - [0,0,0,1]], - [[3,1,0,0], - [1,3,0,0], - [0,0,1,0], - [0,0,0,1]], - [[1,3,0,0], - [3,1,0,0], - [0,0,1,0], - [0,0,0,1]], + [[0, 0, 1, 0], + [0, 1, 0, 0], + [1, 0, 0, 0], + [0, 0, 0, 1]], + [[0, 1, 0, 0], + [0, 0, 1, 0], + [1, 0, 0, 0], + [0, 0, 0, 1]], + [[3, 1, 0, 0], + [1, 3, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1]], + [[1, 3, 0, 0], + [3, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1]], ] -OUT_ORNTS = [[[0,1], - [1,1], - [2,1]], - [[2,1], - [1,1], - [0,1]], - [[2,1], - [0,1], - [1,1]], - [[0,1], - [1,1], - [2,1]], - [[1,1], - [0,1], - [2,1]], - ] - -IN_ARRS = IN_ARRS + [[[np.cos(np.pi/6+i*np.pi/2),np.sin(np.pi/6+i*np.pi/2),0,0], - [-np.sin(np.pi/6+i*np.pi/2),np.cos(np.pi/6+i*np.pi/2),0,0], - [0,0,1,0], - [0,0,0,1]] for i in range(4)] - -OUT_ORNTS = OUT_ORNTS + [[[0,1], - [1,1], - [2,1]], - [[1,-1], - [0,1], - [2,1]], - [[0,-1], - [1,-1], - [2,1]], - [[1,1], - [0,-1], - [2,1]] +OUT_ORNTS = [[[0, 1], + [1, 1], + [2, 1]], + [[2, 1], + [1, 1], + [0, 1]], + [[2, 1], + [0, 1], + [1, 1]], + [[0, 1], + [1, 1], + [2, 1]], + [[1, 1], + [0, 1], + [2, 1]], + ] + +IN_ARRS = IN_ARRS + [[[np.cos(np.pi / 6 + i * np.pi / 2), np.sin(np.pi / 6 + i * np.pi / 2), 0, 0], + [-np.sin(np.pi / 6 + i * np.pi / 2), np.cos(np.pi / 6 + i * np.pi / 2), 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1]] for i in range(4)] + +OUT_ORNTS = OUT_ORNTS + [[[0, 1], + [1, 1], + [2, 1]], + [[1, -1], + [0, 1], + [2, 1]], + [[0, -1], + [1, -1], + [2, 1]], + [[1, 1], + [0, -1], + [2, 1]] ] @@ -87,20 +87,20 @@ def same_transform(taff, ornt, shape): # indices from ``arr`` are transformed by (the inverse of) `taff`, # and we index into ``t_arr`` with these transformed points, then we # should get the same values as we would from indexing into arr with - # the untransformed points. + # the untransformed points. shape = np.array(shape) size = np.prod(shape) arr = np.arange(size).reshape(shape) # apply ornt transformations t_arr = apply_orientation(arr, ornt) # get all point indices in arr - i,j,k = shape - arr_pts = np.mgrid[:i,:j,:k].reshape((3,-1)) + i, j, k = shape + arr_pts = np.mgrid[:i, :j, :k].reshape((3, -1)) # inverse of taff takes us from point index in arr to point index in # t_arr itaff = np.linalg.inv(taff) - # apply itaff so that points indexed in t_arr should correspond - o2t_pts = np.dot(itaff[:3,:3], arr_pts) + itaff[:3,3][:,None] + # apply itaff so that points indexed in t_arr should correspond + o2t_pts = np.dot(itaff[:3, :3], arr_pts) + itaff[:3, 3][:, None] assert np.allclose(np.round(o2t_pts), o2t_pts) # fancy index out the t_arr values vals = t_arr[list(o2t_pts.astype('i'))] @@ -110,23 +110,23 @@ def same_transform(taff, ornt, shape): def test_apply(): # most tests are in ``same_transform`` above, via the # test_io_orientations - a = np.arange(24).reshape((2,3,4)) + a = np.arange(24).reshape((2, 3, 4)) # Test 4D with an example orientation ornt = OUT_ORNTS[-1] - t_arr = apply_orientation(a[:,:,:,None], ornt) + t_arr = apply_orientation(a[:, :, :, None], ornt) assert_equal(t_arr.ndim, 4) # Orientation errors assert_raises(OrientationError, apply_orientation, - a[:,:,1], ornt) + a[:, :, 1], ornt) assert_raises(OrientationError, apply_orientation, a, - [[0,1],[np.nan,np.nan],[2,1]]) + [[0, 1], [np.nan, np.nan], [2, 1]]) def test_flip_axis(): - a = np.arange(24).reshape((2,3,4)) + a = np.arange(24).reshape((2, 3, 4)) assert_array_equal( flip_axis(a), np.flipud(a)) @@ -148,7 +148,7 @@ def test_flip_axis(): def test_io_orientation(): - for shape in ((2,3,4), (20, 15, 7)): + for shape in ((2, 3, 4), (20, 15, 7)): for in_arr, out_ornt in zip(IN_ARRS, OUT_ORNTS): ornt = io_orientation(in_arr) assert_array_equal(ornt, out_ornt) @@ -158,7 +158,7 @@ def test_io_orientation(): arr = in_arr.copy() ex_ornt = out_ornt.copy() # flip the input axis in affine - arr[:,axno] *= -1 + arr[:, axno] *= -1 # check that result shows flip ex_ornt[axno, 1] *= -1 ornt = io_orientation(arr) @@ -166,8 +166,8 @@ def test_io_orientation(): taff = inv_ornt_aff(ornt, shape) assert_true(same_transform(taff, ornt, shape)) # Test nasty hang for zero columns - rzs = np.c_[np.diag([2, 3, 4, 5]), np.zeros((4,3))] - arr = from_matvec(rzs, [15,16,17,18]) + rzs = np.c_[np.diag([2, 3, 4, 5]), np.zeros((4, 3))] + arr = from_matvec(rzs, [15, 16, 17, 18]) ornt = io_orientation(arr) assert_array_equal(ornt, [[0, 1], [1, 1], @@ -194,7 +194,7 @@ def test_io_orientation(): (eps, False), (eps * 5, False), (eps * 10, True), - ): + ): def_aff[1, 1] = y_val res = pass_tol if has_y else fail_tol assert_array_equal(io_orientation(def_aff), res) @@ -204,102 +204,104 @@ def test_io_orientation(): def_aff[1, 1] = eps * 10 assert_array_equal(io_orientation(def_aff, tol=1e-5), fail_tol) + def test_ornt_transform(): - assert_array_equal(ornt_transform([[0,1], [1,1], [2,-1]], - [[1,1], [0,1], [2,1]]), - [[1,1], [0,1], [2,-1]] - ) - assert_array_equal(ornt_transform([[0,1], [1,1], [2,1]], - [[2,1], [0,-1], [1,1]]), - [[1,-1], [2,1], [0,1]] - ) - #Must have same shape - assert_raises(ValueError, - ornt_transform, - [[0,1], [1,1]], - [[0,1], [1,1], [2, 1]]) - - #Must be (N,2) in shape - assert_raises(ValueError, - ornt_transform, - [[0,1,1], [1,1,1]], - [[0,1,1], [1,1,1]]) - + assert_array_equal(ornt_transform([[0, 1], [1, 1], [2, -1]], + [[1, 1], [0, 1], [2, 1]]), + [[1, 1], [0, 1], [2, -1]] + ) + assert_array_equal(ornt_transform([[0, 1], [1, 1], [2, 1]], + [[2, 1], [0, -1], [1, 1]]), + [[1, -1], [2, 1], [0, 1]] + ) + # Must have same shape + assert_raises(ValueError, + ornt_transform, + [[0, 1], [1, 1]], + [[0, 1], [1, 1], [2, 1]]) + + # Must be (N,2) in shape + assert_raises(ValueError, + ornt_transform, + [[0, 1, 1], [1, 1, 1]], + [[0, 1, 1], [1, 1, 1]]) + def test_ornt2axcodes(): # Recoding orientation to axis codes - labels = (('left', 'right'),('back', 'front'), ('down', 'up')) - assert_equal(ornt2axcodes([[0,1], - [1,1], - [2,1]], labels), ('right', 'front', 'up')) - assert_equal(ornt2axcodes([[0,-1], - [1,-1], - [2,-1]], labels), ('left', 'back', 'down')) - assert_equal(ornt2axcodes([[2,-1], - [1,-1], - [0,-1]], labels), ('down', 'back', 'left')) - assert_equal(ornt2axcodes([[1,1], - [2,-1], - [0,1]], labels), ('front', 'down', 'right')) + labels = (('left', 'right'), ('back', 'front'), ('down', 'up')) + assert_equal(ornt2axcodes([[0, 1], + [1, 1], + [2, 1]], labels), ('right', 'front', 'up')) + assert_equal(ornt2axcodes([[0, -1], + [1, -1], + [2, -1]], labels), ('left', 'back', 'down')) + assert_equal(ornt2axcodes([[2, -1], + [1, -1], + [0, -1]], labels), ('down', 'back', 'left')) + assert_equal(ornt2axcodes([[1, 1], + [2, -1], + [0, 1]], labels), ('front', 'down', 'right')) # default is RAS output directions - assert_equal(ornt2axcodes([[0,1], - [1,1], - [2,1]]), ('R', 'A', 'S')) + assert_equal(ornt2axcodes([[0, 1], + [1, 1], + [2, 1]]), ('R', 'A', 'S')) # dropped axes produce None - assert_equal(ornt2axcodes([[0,1], - [np.nan,np.nan], - [2,1]]), ('R', None, 'S')) + assert_equal(ornt2axcodes([[0, 1], + [np.nan, np.nan], + [2, 1]]), ('R', None, 'S')) # Non integer axes raises error - assert_raises(ValueError, ornt2axcodes, [[0.1,1]]) + assert_raises(ValueError, ornt2axcodes, [[0.1, 1]]) # As do directions not in range - assert_raises(ValueError, ornt2axcodes, [[0,0]]) + assert_raises(ValueError, ornt2axcodes, [[0, 0]]) + def test_axcodes2ornt(): # Go from axcodes back to orientations - labels = (('left', 'right'),('back', 'front'), ('down', 'up')) + labels = (('left', 'right'), ('back', 'front'), ('down', 'up')) assert_array_equal(axcodes2ornt(('right', 'front', 'up'), labels), - [[0,1], - [1,1], - [2,1]] - ) + [[0, 1], + [1, 1], + [2, 1]] + ) assert_array_equal(axcodes2ornt(('left', 'back', 'down'), labels), - [[0,-1], - [1,-1], - [2,-1]] - ) + [[0, -1], + [1, -1], + [2, -1]] + ) assert_array_equal(axcodes2ornt(('down', 'back', 'left'), labels), - [[2,-1], - [1,-1], - [0,-1]] - ) + [[2, -1], + [1, -1], + [0, -1]] + ) assert_array_equal(axcodes2ornt(('front', 'down', 'right'), labels), - [[1,1], - [2,-1], + [[1, 1], + [2, -1], [0, 1]] ) - + # default is RAS output directions assert_array_equal(axcodes2ornt(('R', 'A', 'S')), - [[0,1], - [1,1], - [2,1]] - ) - - #dropped axes produce None - assert_array_equal(axcodes2ornt(('R', None, 'S')), - [[0,1], - [np.nan,np.nan], - [2,1]] - ) - + [[0, 1], + [1, 1], + [2, 1]] + ) + + # dropped axes produce None + assert_array_equal(axcodes2ornt(('R', None, 'S')), + [[0, 1], + [np.nan, np.nan], + [2, 1]] + ) + def test_aff2axcodes(): - labels = (('left', 'right'),('back', 'front'), ('down', 'up')) + labels = (('left', 'right'), ('back', 'front'), ('down', 'up')) assert_equal(aff2axcodes(np.eye(4)), tuple('RAS')) - aff = [[0,1,0,10],[-1,0,0,20],[0,0,1,30],[0,0,0,1]] - assert_equal(aff2axcodes(aff, (('L','R'),('B','F'),('D','U'))), + aff = [[0, 1, 0, 10], [-1, 0, 0, 20], [0, 0, 1, 30], [0, 0, 0, 1]] + assert_equal(aff2axcodes(aff, (('L', 'R'), ('B', 'F'), ('D', 'U'))), ('B', 'R', 'U')) - assert_equal(aff2axcodes(aff, (('L','R'),('B','F'),('D','U'))), + assert_equal(aff2axcodes(aff, (('L', 'R'), ('B', 'F'), ('D', 'U'))), ('B', 'R', 'U')) diff --git a/nibabel/tests/test_parrec.py b/nibabel/tests/test_parrec.py index 1669fb44fa..e03afafb36 100644 --- a/nibabel/tests/test_parrec.py +++ b/nibabel/tests/test_parrec.py @@ -48,81 +48,81 @@ VARY_REC = pjoin(DATA_PATH, 'phantom_varscale.REC') # Affine as we determined it mid-2014 AN_OLD_AFFINE = np.array( - [[-3.64994708, 0., 1.83564171, 123.66276611], - [0., -3.75, 0., 115.617 ], - [0.86045705, 0., 7.78655376, -27.91161211], - [0., 0., 0., 1. ]]) + [[-3.64994708, 0., 1.83564171, 123.66276611], + [0., -3.75, 0., 115.617], + [0.86045705, 0., 7.78655376, -27.91161211], + [0., 0., 0., 1.]]) # Affine from Philips-created NIfTI PHILIPS_AFFINE = np.array( - [[ -3.65 , -0.0016, 1.8356, 125.4881], - [ 0.0016, -3.75 , -0.0004, 117.4916], - [ 0.8604, 0.0002, 7.7866, -28.3411], - [ 0. , 0. , 0. , 1. ]]) + [[-3.65, -0.0016, 1.8356, 125.4881], + [0.0016, -3.75, -0.0004, 117.4916], + [0.8604, 0.0002, 7.7866, -28.3411], + [0., 0., 0., 1.]]) # Affines generated by parrec.py from test data in many orientations # Data from http://psydata.ovgu.de/philips_achieva_testfiles/conversion2 -PREVIOUS_AFFINES={ - "Phantom_EPI_3mm_cor_20APtrans_15RLrot_SENSE_15_1" : - npa([[ -3. , 0. , 0. , 118.5 ], - [ 0. , -0.77645714, -3.18755523, 72.82738377], - [ 0. , -2.89777748, 0.85410285, 97.80720486], - [ 0. , 0. , 0. , 1. ]]), - "Phantom_EPI_3mm_cor_SENSE_8_1" : - npa([[ -3. , 0. , 0. , 118.5 ], - [ 0. , 0. , -3.3 , 64.35], - [ 0. , -3. , 0. , 118.5 ], - [ 0. , 0. , 0. , 1. ]]), - "Phantom_EPI_3mm_sag_15AP_SENSE_13_1" : - npa([[ 0. , 0.77645714, 3.18755523, -92.82738377], - [ -3. , 0. , 0. , 118.5 ], - [ 0. , -2.89777748, 0.85410285, 97.80720486], - [ 0. , 0. , 0. , 1. ]]), - "Phantom_EPI_3mm_sag_15FH_SENSE_12_1" : - npa([[ 0.77645714, 0. , 3.18755523, -92.82738377], - [ -2.89777748, 0. , 0.85410285, 97.80720486], - [ 0. , -3. , 0. , 118.5 ], - [ 0. , 0. , 0. , 1. ]]), - "Phantom_EPI_3mm_sag_15RL_SENSE_11_1" : - npa([[ 0. , 0. , 3.3 , -64.35 ], - [ -2.89777748, -0.77645714, 0. , 145.13226726], - [ 0.77645714, -2.89777748, 0. , 83.79215357], - [ 0. , 0. , 0. , 1. ]]), - "Phantom_EPI_3mm_sag_SENSE_7_1" : - npa([[ 0. , 0. , 3.3 , -64.35], - [ -3. , 0. , 0. , 118.5 ], - [ 0. , -3. , 0. , 118.5 ], - [ 0. , 0. , 0. , 1. ]]), - "Phantom_EPI_3mm_tra_-30AP_10RL_20FH_SENSE_14_1" : - npa([[ 0. , 0. , 3.3 , -74.35], - [ -3. , 0. , 0. , 148.5 ], - [ 0. , -3. , 0. , 138.5 ], - [ 0. , 0. , 0. , 1. ]]), - "Phantom_EPI_3mm_tra_15FH_SENSE_9_1" : - npa([[ 0.77645714, 0. , 3.18755523, -92.82738377], - [ -2.89777748, 0. , 0.85410285, 97.80720486], - [ 0. , -3. , 0. , 118.5 ], - [ 0. , 0. , 0. , 1. ]]), - "Phantom_EPI_3mm_tra_15RL_SENSE_10_1" : - npa([[ 0. , 0. , 3.3 , -64.35 ], - [ -2.89777748, -0.77645714, 0. , 145.13226726], - [ 0.77645714, -2.89777748, 0. , 83.79215357], - [ 0. , 0. , 0. , 1. ]]), - "Phantom_EPI_3mm_tra_SENSE_6_1" : - npa([[ -3. , 0. , 0. , 118.5 ], - [ 0. , -3. , 0. , 118.5 ], - [ 0. , 0. , 3.3 , -64.35], - [ 0. , 0. , 0. , 1. ]]), +PREVIOUS_AFFINES = { + "Phantom_EPI_3mm_cor_20APtrans_15RLrot_SENSE_15_1": + npa([[-3., 0., 0., 118.5], + [0., -0.77645714, -3.18755523, 72.82738377], + [0., -2.89777748, 0.85410285, 97.80720486], + [0., 0., 0., 1.]]), + "Phantom_EPI_3mm_cor_SENSE_8_1": + npa([[-3., 0., 0., 118.5], + [0., 0., -3.3, 64.35], + [0., -3., 0., 118.5], + [0., 0., 0., 1.]]), + "Phantom_EPI_3mm_sag_15AP_SENSE_13_1": + npa([[0., 0.77645714, 3.18755523, -92.82738377], + [-3., 0., 0., 118.5], + [0., -2.89777748, 0.85410285, 97.80720486], + [0., 0., 0., 1.]]), + "Phantom_EPI_3mm_sag_15FH_SENSE_12_1": + npa([[0.77645714, 0., 3.18755523, -92.82738377], + [-2.89777748, 0., 0.85410285, 97.80720486], + [0., -3., 0., 118.5], + [0., 0., 0., 1.]]), + "Phantom_EPI_3mm_sag_15RL_SENSE_11_1": + npa([[0., 0., 3.3, -64.35], + [-2.89777748, -0.77645714, 0., 145.13226726], + [0.77645714, -2.89777748, 0., 83.79215357], + [0., 0., 0., 1.]]), + "Phantom_EPI_3mm_sag_SENSE_7_1": + npa([[0., 0., 3.3, -64.35], + [-3., 0., 0., 118.5], + [0., -3., 0., 118.5], + [0., 0., 0., 1.]]), + "Phantom_EPI_3mm_tra_-30AP_10RL_20FH_SENSE_14_1": + npa([[0., 0., 3.3, -74.35], + [-3., 0., 0., 148.5], + [0., -3., 0., 138.5], + [0., 0., 0., 1.]]), + "Phantom_EPI_3mm_tra_15FH_SENSE_9_1": + npa([[0.77645714, 0., 3.18755523, -92.82738377], + [-2.89777748, 0., 0.85410285, 97.80720486], + [0., -3., 0., 118.5], + [0., 0., 0., 1.]]), + "Phantom_EPI_3mm_tra_15RL_SENSE_10_1": + npa([[0., 0., 3.3, -64.35], + [-2.89777748, -0.77645714, 0., 145.13226726], + [0.77645714, -2.89777748, 0., 83.79215357], + [0., 0., 0., 1.]]), + "Phantom_EPI_3mm_tra_SENSE_6_1": + npa([[-3., 0., 0., 118.5], + [0., -3., 0., 118.5], + [0., 0., 3.3, -64.35], + [0., 0., 0., 1.]]), } # Original values for b values in DTI.PAR, still in PSL orientation -DTI_PAR_BVECS = np.array([[-0.667, -0.667, -0.333], - [-0.333, 0.667, -0.667], - [-0.667, 0.333, 0.667], - [-0.707, -0.000, -0.707], - [-0.707, 0.707, 0.000], - [-0.000, 0.707, 0.707], - [ 0.000, 0.000, 0.000], - [ 0.000, 0.000, 0.000]]) +DTI_PAR_BVECS = np.array([[-0.667, -0.667, -0.333], + [-0.333, 0.667, -0.667], + [-0.667, 0.333, 0.667], + [-0.707, -0.000, -0.707], + [-0.707, 0.707, 0.000], + [-0.000, 0.707, 0.707], + [0.000, 0.000, 0.000], + [0.000, 0.000, 0.000]]) # DTI.PAR values for bvecs DTI_PAR_BVALS = [1000] * 6 + [0, 1000] @@ -132,18 +132,18 @@ # Loaded image was ``phantom_EPI_asc_CLEAR_2_1.nii`` from # http://psydata.ovgu.de/philips_achieva_testfiles/conversion dict( - fname = EG_PAR, - shape = (64, 64, 9, 3), - dtype = np.uint16, + fname=EG_PAR, + shape=(64, 64, 9, 3), + dtype=np.uint16, # We disagree with Philips about the right affine, for the moment, so # use our own affine as determined from a previous load in nibabel - affine = AN_OLD_AFFINE, - zooms = (3.75, 3.75, 8.0, 2.0), - data_summary = dict( - min = 0.0, - max = 2299.4110643863678, - mean = 194.95876256117265), - is_proxy = True) + affine=AN_OLD_AFFINE, + zooms=(3.75, 3.75, 8.0, 2.0), + data_summary=dict( + min=0.0, + max=2299.4110643863678, + mean=194.95876256117265), + is_proxy=True) ] @@ -251,7 +251,7 @@ def test_get_sorted_slice_indices(): # Reverse - volume order preserved hdr = PARRECHeader(HDR_INFO, HDR_DEFS[::-1]) assert_array_equal(hdr.get_sorted_slice_indices(), - [8, 7, 6, 5, 4, 3, 2, 1, 0, + [8, 7, 6, 5, 4, 3, 2, 1, 0, 17, 16, 15, 14, 13, 12, 11, 10, 9, 26, 25, 24, 23, 22, 21, 20, 19, 18]) # Omit last slice, only two volumes @@ -263,7 +263,7 @@ def test_get_sorted_slice_indices(): def test_vol_number(): # Test algorithm for calculating volume number assert_array_equal(vol_numbers([1, 3, 0]), [0, 0, 0]) - assert_array_equal(vol_numbers([1, 3, 0, 0]), [ 0, 0, 0, 1]) + assert_array_equal(vol_numbers([1, 3, 0, 0]), [0, 0, 0, 1]) assert_array_equal(vol_numbers([1, 3, 0, 0, 0]), [0, 0, 0, 1, 2]) assert_array_equal(vol_numbers([1, 3, 0, 0, 4]), [0, 0, 0, 1, 0]) assert_array_equal(vol_numbers([1, 3, 0, 3, 1, 0]), @@ -314,7 +314,7 @@ def test_vol_calculations(): assert_equal(set(slice_nos), set(range(1, max_slice + 1))) assert_array_equal(vol_is_full(slice_nos, max_slice), True) if par.endswith('NA.PAR'): - continue # Cannot parse this one + continue # Cannot parse this one # Load truncated without warnings with suppress_warnings(): hdr = PARRECHeader(gen_info, slice_info, True) @@ -478,15 +478,15 @@ def test_image_creation(): hdr = PARRECHeader(HDR_INFO, HDR_DEFS) arr_prox_dv = np.array(PARRECArrayProxy(EG_REC, hdr, scaling='dv')) arr_prox_fp = np.array(PARRECArrayProxy(EG_REC, hdr, scaling='fp')) - good_map = dict(image = FileHolder(EG_REC), - header = FileHolder(EG_PAR)) - trunc_map = dict(image = FileHolder(TRUNC_REC), - header = FileHolder(TRUNC_PAR)) + good_map = dict(image=FileHolder(EG_REC), + header=FileHolder(EG_PAR)) + trunc_map = dict(image=FileHolder(TRUNC_REC), + header=FileHolder(TRUNC_PAR)) for func, good_param, trunc_param in ( - (PARRECImage.from_filename, EG_PAR, TRUNC_PAR), - (PARRECImage.load, EG_PAR, TRUNC_PAR), - (parrec.load, EG_PAR, TRUNC_PAR), - (PARRECImage.from_file_map, good_map, trunc_map)): + (PARRECImage.from_filename, EG_PAR, TRUNC_PAR), + (PARRECImage.load, EG_PAR, TRUNC_PAR), + (parrec.load, EG_PAR, TRUNC_PAR), + (PARRECImage.from_file_map, good_map, trunc_map)): img = func(good_param) assert_array_equal(img.dataobj, arr_prox_dv) # permit_truncated is keyword only @@ -516,6 +516,7 @@ def test_image_creation(): class FakeHeader(object): """ Minimal API of header for PARRECArrayProxy """ + def __init__(self, shape, dtype): self._shape = shape self._dtype = np.dtype(dtype) diff --git a/nibabel/tests/test_parrec_data.py b/nibabel/tests/test_parrec_data.py index 5f86eb2b0a..16de836206 100644 --- a/nibabel/tests/test_parrec_data.py +++ b/nibabel/tests/test_parrec_data.py @@ -30,7 +30,7 @@ def test_loading(): par_root, ext = splitext(basename(par)) # NA.PAR appears to be a localizer, with three slices in each of the # three orientations: sagittal; coronal, transverse - if par_root == 'NA': + if par_root == 'NA': continue # Check we can load the image pimg = load(par) diff --git a/nibabel/tests/test_proxy_api.py b/nibabel/tests/test_proxy_api.py index 9e2cfb2f4b..7825f7faff 100644 --- a/nibabel/tests/test_proxy_api.py +++ b/nibabel/tests/test_proxy_api.py @@ -73,7 +73,7 @@ def _some_slicers(shape): for i in range(ndim): if i % 2: slicers[i, i] = -1 - elif shape[i] < 2: # some proxy examples have length 1 axes + elif shape[i] < 2: # some proxy examples have length 1 axes slicers[i, i] = 0 # Add a newaxis to keep us on our toes no_pos = ndim // 2 @@ -147,7 +147,7 @@ def validate_fileobj_isolated(self, pmaker, params): if isinstance(fio, string_types): return assert_array_equal(prox, params['arr_out']) - fio.read() # move to end of file + fio.read() # move to end of file assert_array_equal(prox, params['arr_out']) def validate_proxy_slicing(self, pmaker, params): @@ -210,10 +210,10 @@ def obj_params(self): hdr.set_data_shape(shape) if self.settable_offset: hdr.set_data_offset(offset) - if (slope, inter) == (1, 0): # No scaling applied + if (slope, inter) == (1, 0): # No scaling applied # dtype from array dtype_out = dtype - else: # scaling or offset applied + else: # scaling or offset applied # out dtype predictable from apply_read_scaling # and datatypes of slope, inter hdr.set_slope_inter(slope, inter) @@ -222,6 +222,7 @@ def obj_params(self): 1. if s is None else s, 0. if i is None else i) dtype_out = tmp.dtype.type + def sio_func(): fio = BytesIO() fio.truncate(0) @@ -234,18 +235,19 @@ def sio_func(): fio, new_hdr) params = dict( - dtype = dtype, - dtype_out = dtype_out, - arr = arr.copy(), - arr_out = arr * slope + inter, - shape = shape, - offset = offset, - slope = slope, - inter = inter) + dtype=dtype, + dtype_out=dtype_out, + arr=arr.copy(), + arr_out=arr * slope + inter, + shape=shape, + offset=offset, + slope=slope, + inter=inter) yield sio_func, params # Same with filenames with InTemporaryDirectory(): fname = 'data.bin' + def fname_func(): with open(fname, 'wb') as fio: fio.seek(offset) @@ -296,7 +298,7 @@ class TestNifti1ProxyAPI(TestSpm99AnalyzeProxyAPI): class TestMGHAPI(TestAnalyzeProxyAPI): header_class = MGHHeader - shapes = ((2, 3, 4), (2, 3, 4, 5)) # MGH can only do >= 3D + shapes = ((2, 3, 4), (2, 3, 4, 5)) # MGH can only do >= 3D has_slope = False has_inter = False settable_offset = False @@ -308,6 +310,7 @@ class TestMinc1API(_TestProxyAPI): file_class = minc1.Minc1File eg_fname = 'tiny.mnc' eg_shape = (10, 20, 20) + @staticmethod def opener(f): return netcdf_file(f, mode='r') @@ -327,6 +330,7 @@ def obj_params(self): eg_path = pjoin(DATA_PATH, self.eg_fname) arr_out = self.file_class( self.opener(eg_path)).get_scaled_data() + def eg_func(): mf = self.file_class(self.opener(eg_path)) prox = minc1.MincImageArrayProxy(mf) @@ -345,6 +349,7 @@ class TestMinc2API(TestMinc1API): file_class = minc2.Minc2File eg_fname = 'small.mnc' eg_shape = (18, 28, 29) + @staticmethod def opener(f): return h5py.File(f, mode='r') @@ -358,6 +363,7 @@ def obj_params(self): eg_path = pjoin(DATA_PATH, self.eg_fname) img = ecat.load(eg_path) arr_out = img.get_data() + def eg_func(): img = ecat.load(eg_path) sh = img.get_subheaders() @@ -378,6 +384,7 @@ class TestPARRECAPI(_TestProxyAPI): def _func_dict(self, rec_name): img = parrec.load(rec_name) arr_out = img.get_data() + def eg_func(): img = parrec.load(rec_name) prox = parrec.PARRECArrayProxy(rec_name, @@ -386,7 +393,7 @@ def eg_func(): fobj = open(rec_name, 'rb') return prox, fobj, img.header return (eg_func, - dict(shape = img.shape, + dict(shape=img.shape, dtype_out=np.float64, arr_out=arr_out)) diff --git a/nibabel/tests/test_quaternions.py b/nibabel/tests/test_quaternions.py index 71e3a06d4c..ba62790066 100644 --- a/nibabel/tests/test_quaternions.py +++ b/nibabel/tests/test_quaternions.py @@ -29,14 +29,14 @@ def slow(t): # Example rotations ''' eg_rots = [] -params = (-pi,pi,pi/2) +params = (-pi, pi, pi / 2) zs = np.arange(*params) ys = np.arange(*params) xs = np.arange(*params) for z in zs: for y in ys: for x in xs: - eg_rots.append(nea.euler2mat(z,y,x)) + eg_rots.append(nea.euler2mat(z, y, x)) # Example quaternions (from rotations) eg_quats = [] for M in eg_rots: @@ -46,7 +46,7 @@ def slow(t): # Set of arbitrary unit quaternions unit_quats = set() -params = range(-2,3) +params = range(-2, 3) for w in params: for x in params: for y in params: @@ -61,19 +61,19 @@ def slow(t): def test_fillpos(): # Takes np array xyz = np.zeros((3,)) - w,x,y,z = nq.fillpositive(xyz) + w, x, y, z = nq.fillpositive(xyz) yield assert_true, w == 1 # Or lists xyz = [0] * 3 - w,x,y,z = nq.fillpositive(xyz) + w, x, y, z = nq.fillpositive(xyz) yield assert_true, w == 1 # Errors with wrong number of values yield assert_raises, ValueError, nq.fillpositive, [0, 0] - yield assert_raises, ValueError, nq.fillpositive, [0]*4 + yield assert_raises, ValueError, nq.fillpositive, [0] * 4 # Errors with negative w2 - yield assert_raises, ValueError, nq.fillpositive, [1.0]*3 + yield assert_raises, ValueError, nq.fillpositive, [1.0] * 3 # Test corner case where w is near zero - wxyz = nq.fillpositive([1,0,0]) + wxyz = nq.fillpositive([1, 0, 0]) yield assert_true, wxyz[0] == 0.0 @@ -96,7 +96,7 @@ def test_quat2mat(): yield assert_array_almost_equal, M, np.diag([1, -1, -1]) M = nq.quat2mat([0, 0, 0, 0]) yield assert_array_almost_equal, M, np.eye(3) - + def test_inverse(): # Takes sequence @@ -113,7 +113,7 @@ def test_inverse(): def test_eye(): qi = nq.eye() yield assert_true, qi.dtype.kind == 'f' - yield assert_true, np.all([1,0,0,0]==qi) + yield assert_true, np.all([1, 0, 0, 0] == qi) yield assert_true, np.allclose(nq.quat2mat(qi), np.eye(3)) @@ -127,11 +127,11 @@ def test_norm(): @slow def test_mult(): - # Test that quaternion * same as matrix * + # Test that quaternion * same as matrix * for M1, q1 in eg_pairs[0::4]: for M2, q2 in eg_pairs[1::4]: q21 = nq.mult(q2, q1) - yield assert_array_almost_equal, np.dot(M2,M1), nq.quat2mat(q21) + yield assert_array_almost_equal, np.dot(M2, M1), nq.quat2mat(q21) def test_inverse(): @@ -144,7 +144,7 @@ def test_inverse(): def test_eye(): qi = nq.eye() - yield assert_true, np.all([1,0,0,0]==qi) + yield assert_true, np.all([1, 0, 0, 0] == qi) yield assert_true, np.allclose(nq.quat2mat(qi), np.eye(3)) diff --git a/nibabel/tests/test_recoder.py b/nibabel/tests/test_recoder.py index 0db2e3e376..e340936ff0 100644 --- a/nibabel/tests/test_recoder.py +++ b/nibabel/tests/test_recoder.py @@ -14,9 +14,10 @@ from nose.tools import assert_equal, assert_raises, assert_true, assert_false + def test_recoder(): # simplest case, no aliases - codes = ((1,),(2,)) + codes = ((1,), (2,)) rc = Recoder(codes) yield assert_equal, rc.code[1], 1 yield assert_equal, rc.code[2], 2 @@ -27,8 +28,8 @@ def test_recoder(): yield assert_equal, rc.code1[1], 1 yield assert_equal, rc.code1[2], 2 # code and label - codes = ((1,'one'), (2,'two')) - rc = Recoder(codes) # just with implicit alias + codes = ((1, 'one'), (2, 'two')) + rc = Recoder(codes) # just with implicit alias yield assert_equal, rc.code[1], 1 yield assert_equal, rc.code[2], 2 yield assert_raises, KeyError, rc.code.__getitem__, 3 @@ -36,19 +37,19 @@ def test_recoder(): yield assert_equal, rc.code['two'], 2 yield assert_raises, KeyError, rc.code.__getitem__, 'three' yield assert_raises, AttributeError, rc.__getattribute__, 'label' - rc = Recoder(codes, ['code1', 'label']) # with explicit column names + rc = Recoder(codes, ['code1', 'label']) # with explicit column names yield assert_raises, AttributeError, rc.__getattribute__, 'code' yield assert_equal, rc.code1[1], 1 yield assert_equal, rc.code1['one'], 1 yield assert_equal, rc.label[1], 'one' yield assert_equal, rc.label['one'], 'one' # code, label, aliases - codes = ((1,'one','1','first'), (2,'two')) - rc = Recoder(codes) # just with implicit alias + codes = ((1, 'one', '1', 'first'), (2, 'two')) + rc = Recoder(codes) # just with implicit alias yield assert_equal, rc.code[1], 1 yield assert_equal, rc.code['one'], 1 yield assert_equal, rc.code['first'], 1 - rc = Recoder(codes, ['code1', 'label']) # with explicit column names + rc = Recoder(codes, ['code1', 'label']) # with explicit column names yield assert_equal, rc.code1[1], 1 yield assert_equal, rc.code1['first'], 1 yield assert_equal, rc.label[1], 'one' @@ -60,20 +61,25 @@ def test_recoder(): def test_custom_dicter(): # Allow custom dict-like object in constructor class MyDict(object): + def __init__(self): self._keys = [] + def __setitem__(self, key, value): self._keys.append(key) + def __getitem__(self, key): if key in self._keys: return 'spam' return 'eggs' + def keys(self): return ['some', 'keys'] + def values(self): return ['funny', 'list'] # code, label, aliases - codes = ((1,'one','1','first'), (2,'two')) + codes = ((1, 'one', '1', 'first'), (2, 'two')) rc = Recoder(codes, map_maker=MyDict) yield assert_equal, rc.code[1], 'spam' yield assert_equal, rc.code['one'], 'spam' @@ -84,7 +90,7 @@ def values(self): def test_add_codes(): - codes = ((1,'one','1','first'), (2,'two')) + codes = ((1, 'one', '1', 'first'), (2, 'two')) rc = Recoder(codes) yield assert_equal, rc.code['two'], 2 yield assert_raises, KeyError, rc.code.__getitem__, 'three' @@ -95,7 +101,7 @@ def test_add_codes(): def test_sugar(): # Syntactic sugar for recoder class - codes = ((1,'one','1','first'), (2,'two')) + codes = ((1, 'one', '1', 'first'), (2, 'two')) rc = Recoder(codes) # Field1 is synonym for first named dict yield assert_equal, rc.code, rc.field1 @@ -105,7 +111,7 @@ def test_sugar(): yield assert_equal, rc[1], rc.field1[1] yield assert_equal, rc['two'], rc.field1['two'] # keys gets all keys - yield assert_equal, set(rc.keys()), set((1,'one','1','first',2,'two')) + yield assert_equal, set(rc.keys()), set((1, 'one', '1', 'first', 2, 'two')) # value_set gets set of values from first column yield assert_equal, rc.value_set(), set((1, 2)) # or named column if given @@ -151,5 +157,3 @@ def test_dtmapper(): assert_equal(d[sw_dt], 'spam') sw_intp_dt = intp_dt.newbyteorder(swapped_code) assert_equal(d[sw_intp_dt], 'spam') - - diff --git a/nibabel/tests/test_round_trip.py b/nibabel/tests/test_round_trip.py index fc1a76df0b..0b23bf192e 100644 --- a/nibabel/tests/test_round_trip.py +++ b/nibabel/tests/test_round_trip.py @@ -17,6 +17,7 @@ DEBUG = True + def round_trip(arr, out_dtype): img = Nifti1Image(arr, np.eye(4)) img.file_map['image'].fileobj = BytesIO() @@ -89,6 +90,7 @@ def test_big_bad_ulp(): BIG_FLOAT = np.float64 + def test_round_trip(): scaling_type = np.float32 rng = np.random.RandomState(20111121) @@ -102,7 +104,7 @@ def test_round_trip(): # Expanding standard deviations for i, sd_10 in enumerate(sd_10s): sd = 10.0**sd_10 - V_in = rng.normal(0, sd, size=(N,1)) + V_in = rng.normal(0, sd, size=(N, 1)) for j, in_type in enumerate(f_types): for k, out_type in enumerate(iuint_types): check_arr(sd_10, V_in, in_type, out_type, scaling_type) @@ -115,7 +117,7 @@ def test_round_trip(): center = type_range / 2.0 + mn # float(sd) because type_range can be type 'long' width = type_range * float(sd) - V_in = rng.normal(center, width, size=(N,1)) + V_in = rng.normal(center, width, size=(N, 1)) for k, out_type in enumerate(iuint_types): check_arr(sd, V_in, in_type, out_type, scaling_type) @@ -125,7 +127,7 @@ def check_arr(test_id, V_in, in_type, out_type, scaling_type): if arr_dash is None: # Scaling causes a header or writer error return - nzs = arr != 0 # avoid divide by zero error + nzs = arr != 0 # avoid divide by zero error if not np.any(nzs): if DEBUG: raise ValueError('Array all zero') @@ -137,7 +139,7 @@ def check_arr(test_id, V_in, in_type, out_type, scaling_type): return rel_err = np.abs(top / arr) abs_err = np.abs(top) - if slope == 1: # integers output, offset only scaling + if slope == 1: # integers output, offset only scaling if set((in_type, out_type)) == set((np.int64, np.uint64)): # Scaling to or from 64 bit ints can go outside range of continuous # integers for float64 and thus lose precision; take this into @@ -146,7 +148,7 @@ def check_arr(test_id, V_in, in_type, out_type, scaling_type): Ai = A - inter ulps = [big_bad_ulp(A), big_bad_ulp(Ai)] exp_abs_err = np.max(ulps, axis=0) - else: # floats can give full precision - no error! + else: # floats can give full precision - no error! exp_abs_err = np.zeros_like(abs_err) rel_thresh = 0 else: diff --git a/nibabel/tests/test_rstutils.py b/nibabel/tests/test_rstutils.py index f3a454303b..9fd708ba64 100644 --- a/nibabel/tests/test_rstutils.py +++ b/nibabel/tests/test_rstutils.py @@ -10,6 +10,7 @@ from nose import SkipTest from nose.tools import assert_equal, assert_raises + def test_rst_table(): # Tests for printable table function R, C = 3, 4 @@ -18,42 +19,42 @@ def test_rst_table(): raise SkipTest("Known (later fixed) bug in python3.2/numpy " "treating np.int64 as str") assert_equal(rst_table(cell_values), -"""+--------+--------+--------+--------+--------+ + """+--------+--------+--------+--------+--------+ | | col[0] | col[1] | col[2] | col[3] | +========+========+========+========+========+ | row[0] | 0.00 | 1.00 | 2.00 | 3.00 | | row[1] | 4.00 | 5.00 | 6.00 | 7.00 | | row[2] | 8.00 | 9.00 | 10.00 | 11.00 | +--------+--------+--------+--------+--------+""" - ) + ) assert_equal(rst_table(cell_values, ['a', 'b', 'c']), -"""+---+--------+--------+--------+--------+ + """+---+--------+--------+--------+--------+ | | col[0] | col[1] | col[2] | col[3] | +===+========+========+========+========+ | a | 0.00 | 1.00 | 2.00 | 3.00 | | b | 4.00 | 5.00 | 6.00 | 7.00 | | c | 8.00 | 9.00 | 10.00 | 11.00 | +---+--------+--------+--------+--------+""" - ) + ) assert_raises(ValueError, rst_table, cell_values, ['a', 'b']) assert_raises(ValueError, rst_table, cell_values, ['a', 'b', 'c', 'd']) assert_equal(rst_table(cell_values, None, ['1', '2', '3', '4']), -"""+--------+-------+-------+-------+-------+ + """+--------+-------+-------+-------+-------+ | | 1 | 2 | 3 | 4 | +========+=======+=======+=======+=======+ | row[0] | 0.00 | 1.00 | 2.00 | 3.00 | | row[1] | 4.00 | 5.00 | 6.00 | 7.00 | | row[2] | 8.00 | 9.00 | 10.00 | 11.00 | +--------+-------+-------+-------+-------+""" - ) + ) assert_raises(ValueError, rst_table, cell_values, None, ['1', '2', '3']) assert_raises(ValueError, rst_table, cell_values, None, list('12345')) assert_equal(rst_table(cell_values, title='A title'), -"""******* + """******* A title ******* @@ -64,21 +65,21 @@ def test_rst_table(): | row[1] | 4.00 | 5.00 | 6.00 | 7.00 | | row[2] | 8.00 | 9.00 | 10.00 | 11.00 | +--------+--------+--------+--------+--------+""" - ) - assert_equal(rst_table(cell_values, val_fmt = '{0}'), -"""+--------+--------+--------+--------+--------+ + ) + assert_equal(rst_table(cell_values, val_fmt='{0}'), + """+--------+--------+--------+--------+--------+ | | col[0] | col[1] | col[2] | col[3] | +========+========+========+========+========+ | row[0] | 0 | 1 | 2 | 3 | | row[1] | 4 | 5 | 6 | 7 | | row[2] | 8 | 9 | 10 | 11 | +--------+--------+--------+--------+--------+""" - ) + ) # Doing a fancy cell format cell_values_back = np.arange(R * C)[::-1].reshape((R, C)) cell_3d = np.dstack((cell_values, cell_values_back)) - assert_equal(rst_table(cell_3d, val_fmt = '{0[0]}-{0[1]}'), -"""+--------+--------+--------+--------+--------+ + assert_equal(rst_table(cell_3d, val_fmt='{0[0]}-{0[1]}'), + """+--------+--------+--------+--------+--------+ | | col[0] | col[1] | col[2] | col[3] | +========+========+========+========+========+ | row[0] | 0-11 | 1-10 | 2-9 | 3-8 | @@ -94,7 +95,7 @@ def test_rst_table(): cross='%', title_heading='#') assert_equal(rst_table(cell_values, title='A title', format_chars=formats), -"""####### + """####### A title ####### @@ -105,7 +106,7 @@ def test_rst_table(): ! row[1] ! 4.00 ! 5.00 ! 6.00 ! 7.00 ! ! row[2] ! 8.00 ! 9.00 ! 10.00 ! 11.00 ! %________%________%________%________%________%""" - ) + ) formats['funny_value'] = '!' assert_raises(ValueError, rst_table, diff --git a/nibabel/tests/test_scaling.py b/nibabel/tests/test_scaling.py index dfc0359dff..0d7395fb88 100644 --- a/nibabel/tests/test_scaling.py +++ b/nibabel/tests/test_scaling.py @@ -26,6 +26,7 @@ # Debug print statements DEBUG = True + def test_scale_min_max(): mx_dt = np.maximum_sctype(np.float) for tp in np.sctypes['uint'] + np.sctypes['int']: @@ -45,15 +46,15 @@ def test_scale_min_max(): for mn, mx in value_pairs: # with intercept scale, inter = scale_min_max(mn, mx, tp, True) - if mx-mn: - assert_array_almost_equal, (mx-inter) / scale, imax - assert_array_almost_equal, (mn-inter) / scale, imin + if mx - mn: + assert_array_almost_equal, (mx - inter) / scale, imax + assert_array_almost_equal, (mn - inter) / scale, imin else: assert_equal, (scale, inter), (1.0, mn) # without intercept if imin == 0 and mn < 0 and mx > 0: (assert_raises, ValueError, - scale_min_max, mn, mx, tp, False) + scale_min_max, mn, mx, tp, False) continue scale, inter = scale_min_max(mn, mx, tp, False) assert_equal, inter, 0.0 @@ -65,9 +66,9 @@ def test_scale_min_max(): assert_true, sc_mn >= imin assert_true, sc_mx <= imax if imin == 0: - if mx > 0: # numbers all +ve + if mx > 0: # numbers all +ve assert_array_almost_equal, mx / scale, imax - else: # numbers all -ve + else: # numbers all -ve assert_array_almost_equal, mn / scale, imax continue if abs(mx) >= abs(mn): @@ -79,28 +80,28 @@ def test_scale_min_max(): def test_finite_range(): # Finite range utility function for in_arr, res in ( - ([[-1, 0, 1],[np.inf, np.nan, -np.inf]], (-1, 1)), - (np.array([[-1, 0, 1],[np.inf, np.nan, -np.inf]]), (-1, 1)), - ([[np.nan],[np.nan]], (np.inf, -np.inf)), # all nans slices + ([[-1, 0, 1], [np.inf, np.nan, -np.inf]], (-1, 1)), + (np.array([[-1, 0, 1], [np.inf, np.nan, -np.inf]]), (-1, 1)), + ([[np.nan], [np.nan]], (np.inf, -np.inf)), # all nans slices (np.zeros((3, 4, 5)) + np.nan, (np.inf, -np.inf)), - ([[-np.inf],[np.inf]], (np.inf, -np.inf)), # all infs slices + ([[-np.inf], [np.inf]], (np.inf, -np.inf)), # all infs slices (np.zeros((3, 4, 5)) + np.inf, (np.inf, -np.inf)), ([[np.nan, -1, 2], [-2, np.nan, 1]], (-2, 2)), ([[np.nan, -np.inf, 2], [-2, np.nan, np.inf]], (-2, 2)), - ([[-np.inf, 2], [np.nan, 1]], (1, 2)), # good max case + ([[-np.inf, 2], [np.nan, 1]], (1, 2)), # good max case ([[np.nan, -np.inf, 2], [-2, np.nan, np.inf]], (-2, 2)), ([np.nan], (np.inf, -np.inf)), ([np.inf], (np.inf, -np.inf)), ([-np.inf], (np.inf, -np.inf)), - ([np.inf, 1], (1, 1)), # only look at finite values + ([np.inf, 1], (1, 1)), # only look at finite values ([-np.inf, 1], (1, 1)), - ([[],[]], (np.inf, -np.inf)), # empty array + ([[], []], (np.inf, -np.inf)), # empty array (np.array([[-3, 0, 1], [2, -1, 4]], dtype=np.int), (-3, 4)), (np.array([[1, 0, 1], [2, 3, 4]], dtype=np.uint), (0, 4)), - ([0., 1, 2, 3], (0,3)), + ([0., 1, 2, 3], (0, 3)), # Complex comparison works as if they are floats - ([[np.nan, -1-100j, 2], [-2, np.nan, 1+100j]], (-2, 2)), - ([[np.nan, -1, 2-100j], [-2+100j, np.nan, 1]], (-2+100j, 2-100j)), + ([[np.nan, -1 - 100j, 2], [-2, np.nan, 1 + 100j]], (-2, 2)), + ([[np.nan, -1, 2 - 100j], [-2 + 100j, np.nan, 1]], (-2 + 100j, 2 - 100j)), ): assert_equal(finite_range(in_arr), res) assert_equal(finite_range(in_arr, False), res) @@ -147,7 +148,7 @@ def test_a2f_mn_mx(): str_io = BytesIO() for out_type in (np.int16, np.float32): arr = np.arange(6, dtype=out_type) - arr_orig = arr.copy() # safe backup for testing against + arr_orig = arr.copy() # safe backup for testing against # Basic round trip to warm up array_to_file(arr, str_io) data_back = array_from_file(arr.shape, out_type, str_io) @@ -158,21 +159,21 @@ def test_a2f_mn_mx(): # arr unchanged assert_array_equal(arr, arr_orig) # returned value clipped low - assert_array_equal(data_back, [2,2,2,3,4,5]) + assert_array_equal(data_back, [2, 2, 2, 3, 4, 5]) # Clip high array_to_file(arr, str_io, mx=4) data_back = array_from_file(arr.shape, out_type, str_io) # arr unchanged assert_array_equal(arr, arr_orig) # returned value clipped high - assert_array_equal(data_back, [0,1,2,3,4,4]) + assert_array_equal(data_back, [0, 1, 2, 3, 4, 4]) # Clip both array_to_file(arr, str_io, mn=2, mx=4) data_back = array_from_file(arr.shape, out_type, str_io) # arr unchanged assert_array_equal(arr, arr_orig) # returned value clipped high - assert_array_equal(data_back, [2,2,2,3,4,4]) + assert_array_equal(data_back, [2, 2, 2, 3, 4, 4]) def test_a2f_nan2zero(): @@ -232,7 +233,7 @@ def test_scaling_in_abstract(): # for any simple way of doing the calculation, the result is near enough for category0, category1 in (('int', 'int'), ('uint', 'int'), - ): + ): for in_type in np.sctypes[category0]: for out_type in np.sctypes[category1]: check_int_a2f(in_type, out_type) @@ -241,7 +242,7 @@ def test_scaling_in_abstract(): ('float', 'uint'), ('complex', 'int'), ('complex', 'uint'), - ): + ): for in_type in np.sctypes[category0]: for out_type in np.sctypes[category1]: with suppress_warnings(): # overflow @@ -260,7 +261,7 @@ def check_int_a2f(in_type, out_type): if DEBUG: print('Hit PPC max -> inf bug; skip in_type %s' % in_type) return - else: # Funny behavior with complex256 + else: # Funny behavior with complex256 data = np.zeros((2,), in_type) data[0] = this_min + 0j data[1] = this_max + 0j diff --git a/nibabel/tests/test_scripts.py b/nibabel/tests/test_scripts.py index 4a6ee7ff52..1544a53a97 100644 --- a/nibabel/tests/test_scripts.py +++ b/nibabel/tests/test_scripts.py @@ -38,8 +38,8 @@ def _proc_stdout(stdout): runner = ScriptRunner( - script_sdir = 'bin', - debug_print_var = 'NIPY_DEBUG_PRINT', + script_sdir='bin', + debug_print_var='NIPY_DEBUG_PRINT', output_processor=_proc_stdout) run_command = runner.run_command @@ -48,7 +48,7 @@ def script_test(func): # Decorator to label test as a script_test func.script_test = True return func -script_test.__test__ = False # It's not a test +script_test.__test__ = False # It's not a test DATA_PATH = abspath(pjoin(dirname(__file__), 'data')) diff --git a/nibabel/tests/test_spaces.py b/nibabel/tests/test_spaces.py index e0d2122751..667059333c 100644 --- a/nibabel/tests/test_spaces.py +++ b/nibabel/tests/test_spaces.py @@ -17,14 +17,13 @@ assert_equal, assert_not_equal) - def assert_all_in(in_shape, in_affine, out_shape, out_affine): slices = tuple(slice(N) for N in in_shape) n_axes = len(in_shape) in_grid = np.mgrid[slices] in_grid = np.rollaxis(in_grid, 0, n_axes + 1) v2v = npl.inv(out_affine).dot(in_affine) - if n_axes < 3: # reduced dimensions case + if n_axes < 3: # reduced dimensions case new_v2v = np.eye(n_axes + 1) new_v2v[:n_axes, :n_axes] = v2v[:n_axes, :n_axes] new_v2v[:n_axes, -1] = v2v[:n_axes, -1] @@ -50,7 +49,7 @@ def test_vox2out_vox(): ((2, 3, 4), np.eye(4), None, (2, 3, 4), np.eye(4)), # Flip first axis ((2, 3, 4), np.diag([-1, 1, 1, 1]), None, - (2, 3, 4), [[1, 0, 0, -1], # axis reversed -> -ve offset + (2, 3, 4), [[1, 0, 0, -1], # axis reversed -> -ve offset [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]), @@ -75,9 +74,9 @@ def test_vox2out_vox(): [0, 0, 0, 1]]), # Less than 3 axes ((2, 3), np.eye(4), None, - (2, 3), np.eye(4)), + (2, 3), np.eye(4)), ((2,), np.eye(4), None, - (2,), np.eye(4)), + (2,), np.eye(4)), # Number of voxel sizes matches length ((2, 3), np.diag([4, 5, 6, 1]), (4, 5), (2, 3), np.diag([4, 5, 1, 1])), @@ -101,9 +100,9 @@ def test_vox2out_vox(): def test_slice2volume(): # Get affine expressing selection of single slice from volume for axis, def_aff in zip((0, 1, 2), ( - [[0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]], - [[1, 0, 0], [0, 0, 0], [0, 1, 0], [0, 0, 1]], - [[1, 0, 0], [0, 1, 0], [0, 0, 0], [0, 0, 1]])): + [[0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]], + [[1, 0, 0], [0, 0, 0], [0, 1, 0], [0, 0, 1]], + [[1, 0, 0], [0, 1, 0], [0, 0, 0], [0, 0, 1]])): for val in (0, 5, 10): exp_aff = np.array(def_aff) exp_aff[axis, -1] = val diff --git a/nibabel/tests/test_spatialimages.py b/nibabel/tests/test_spatialimages.py index 919fc9f846..0ae2c4414c 100644 --- a/nibabel/tests/test_spatialimages.py +++ b/nibabel/tests/test_spatialimages.py @@ -39,17 +39,17 @@ def test_header_init(): assert_equal(hdr.get_data_dtype(), np.dtype(np.float64)) assert_equal(hdr.get_data_shape(), (0,)) assert_equal(hdr.get_zooms(), (1.0,)) - hdr = Header(np.float64, shape=(1,2,3)) + hdr = Header(np.float64, shape=(1, 2, 3)) assert_equal(hdr.get_data_dtype(), np.dtype(np.float64)) - assert_equal(hdr.get_data_shape(), (1,2,3)) + assert_equal(hdr.get_data_shape(), (1, 2, 3)) assert_equal(hdr.get_zooms(), (1.0, 1.0, 1.0)) - hdr = Header(np.float64, shape=(1,2,3), zooms=None) + hdr = Header(np.float64, shape=(1, 2, 3), zooms=None) assert_equal(hdr.get_data_dtype(), np.dtype(np.float64)) - assert_equal(hdr.get_data_shape(), (1,2,3)) + assert_equal(hdr.get_data_shape(), (1, 2, 3)) assert_equal(hdr.get_zooms(), (1.0, 1.0, 1.0)) - hdr = Header(np.float64, shape=(1,2,3), zooms=(3.0, 2.0, 1.0)) + hdr = Header(np.float64, shape=(1, 2, 3), zooms=(3.0, 2.0, 1.0)) assert_equal(hdr.get_data_dtype(), np.dtype(np.float64)) - assert_equal(hdr.get_data_shape(), (1,2,3)) + assert_equal(hdr.get_data_shape(), (1, 2, 3)) assert_equal(hdr.get_zooms(), (3.0, 2.0, 1.0)) @@ -60,19 +60,23 @@ def test_from_header(): assert_equal(Header(), empty) empty = Header.from_header(None) assert_equal(Header(), empty) - hdr = Header(np.float64, shape=(1,2,3), zooms=(3.0, 2.0, 1.0)) + hdr = Header(np.float64, shape=(1, 2, 3), zooms=(3.0, 2.0, 1.0)) copy = Header.from_header(hdr) assert_equal(hdr, copy) assert_false(hdr is copy) + class C(object): + def get_data_dtype(self): return np.dtype('u2') - def get_data_shape(self): return (5,4,3) + + def get_data_shape(self): return (5, 4, 3) + def get_zooms(self): return (10.0, 9.0, 8.0) converted = Header.from_header(C()) assert_true(isinstance(converted, Header)) assert_equal(converted.get_data_dtype(), np.dtype('u2')) - assert_equal(converted.get_data_shape(), (5,4,3)) - assert_equal(converted.get_zooms(), (10.0,9.0,8.0)) + assert_equal(converted.get_data_shape(), (5, 4, 3)) + assert_equal(converted.get_zooms(), (10.0, 9.0, 8.0)) def test_eq(): @@ -81,25 +85,25 @@ def test_eq(): assert_equal(hdr, other) other = Header('u2') assert_not_equal(hdr, other) - other = Header(shape=(1,2,3)) + other = Header(shape=(1, 2, 3)) assert_not_equal(hdr, other) - hdr = Header(shape=(1,2)) - other = Header(shape=(1,2)) + hdr = Header(shape=(1, 2)) + other = Header(shape=(1, 2)) assert_equal(hdr, other) - other = Header(shape=(1,2), zooms=(2.0,3.0)) + other = Header(shape=(1, 2), zooms=(2.0, 3.0)) assert_not_equal(hdr, other) def test_copy(): # test that copy makes independent copy - hdr = Header(np.float64, shape=(1,2,3), zooms=(3.0, 2.0, 1.0)) + hdr = Header(np.float64, shape=(1, 2, 3), zooms=(3.0, 2.0, 1.0)) hdr_copy = hdr.copy() - hdr.set_data_shape((4,5,6)) - assert_equal(hdr.get_data_shape(), (4,5,6)) - assert_equal(hdr_copy.get_data_shape(), (1,2,3)) - hdr.set_zooms((4,5,6)) - assert_equal(hdr.get_zooms(), (4,5,6)) - assert_equal(hdr_copy.get_zooms(), (3,2,1)) + hdr.set_data_shape((4, 5, 6)) + assert_equal(hdr.get_data_shape(), (4, 5, 6)) + assert_equal(hdr_copy.get_data_shape(), (1, 2, 3)) + hdr.set_zooms((4, 5, 6)) + assert_equal(hdr.get_zooms(), (4, 5, 6)) + assert_equal(hdr_copy.get_zooms(), (3, 2, 1)) hdr.set_data_dtype(np.uint8) assert_equal(hdr.get_data_dtype(), np.dtype(np.uint8)) assert_equal(hdr_copy.get_data_dtype(), np.dtype(np.float64)) @@ -108,16 +112,16 @@ def test_copy(): def test_shape_zooms(): hdr = Header() hdr.set_data_shape((1, 2, 3)) - assert_equal(hdr.get_data_shape(), (1,2,3)) - assert_equal(hdr.get_zooms(), (1.0,1.0,1.0)) + assert_equal(hdr.get_data_shape(), (1, 2, 3)) + assert_equal(hdr.get_zooms(), (1.0, 1.0, 1.0)) hdr.set_zooms((4, 3, 2)) - assert_equal(hdr.get_zooms(), (4.0,3.0,2.0)) + assert_equal(hdr.get_zooms(), (4.0, 3.0, 2.0)) hdr.set_data_shape((1, 2)) - assert_equal(hdr.get_data_shape(), (1,2)) - assert_equal(hdr.get_zooms(), (4.0,3.0)) + assert_equal(hdr.get_data_shape(), (1, 2)) + assert_equal(hdr.get_zooms(), (4.0, 3.0)) hdr.set_data_shape((1, 2, 3)) - assert_equal(hdr.get_data_shape(), (1,2,3)) - assert_equal(hdr.get_zooms(), (4.0,3.0,1.0)) + assert_equal(hdr.get_data_shape(), (1, 2, 3)) + assert_equal(hdr.get_zooms(), (4.0, 3.0, 1.0)) # null shape is (0,) hdr.set_data_shape(()) assert_equal(hdr.get_data_shape(), (0,)) @@ -125,12 +129,12 @@ def test_shape_zooms(): # zooms of wrong lengths raise error assert_raises(HeaderDataError, hdr.set_zooms, (4.0, 3.0)) assert_raises(HeaderDataError, - hdr.set_zooms, - (4.0, 3.0, 2.0, 1.0)) + hdr.set_zooms, + (4.0, 3.0, 2.0, 1.0)) # as do negative zooms assert_raises(HeaderDataError, - hdr.set_zooms, - (4.0, 3.0, -2.0)) + hdr.set_zooms, + (4.0, 3.0, -2.0)) def test_data_dtype(): @@ -143,18 +147,18 @@ def test_data_dtype(): def test_affine(): - hdr = Header(np.float64, shape=(1,2,3), zooms=(3.0, 2.0, 1.0)) + hdr = Header(np.float64, shape=(1, 2, 3), zooms=(3.0, 2.0, 1.0)) assert_array_almost_equal(hdr.get_best_affine(), - [[-3.0,0,0,0], - [0,2,0,-1], - [0,0,1,-1], - [0,0,0,1]]) + [[-3.0, 0, 0, 0], + [0, 2, 0, -1], + [0, 0, 1, -1], + [0, 0, 0, 1]]) hdr.default_x_flip = False assert_array_almost_equal(hdr.get_best_affine(), - [[3.0,0,0,0], - [0,2,0,-1], - [0,0,1,-1], - [0,0,0,1]]) + [[3.0, 0, 0, 0], + [0, 2, 0, -1], + [0, 0, 1, -1], + [0, 0, 0, 1]]) assert_array_equal(hdr.get_base_affine(), hdr.get_best_affine()) @@ -163,9 +167,9 @@ def test_read_data(): class CHeader(SpatialHeader): data_layout = 'C' for klass, order in ((SpatialHeader, 'F'), (CHeader, 'C')): - hdr = klass(np.int32, shape=(1,2,3), zooms=(3.0, 2.0, 1.0)) + hdr = klass(np.int32, shape=(1, 2, 3), zooms=(3.0, 2.0, 1.0)) fobj = BytesIO() - data = np.arange(6).reshape((1,2,3)) + data = np.arange(6).reshape((1, 2, 3)) hdr.data_to_fileobj(data, fobj) assert_equal(fobj.getvalue(), data.astype(np.int32).tostring(order=order)) @@ -205,7 +209,7 @@ def test_isolation(self): aff = np.eye(4) img = img_klass(arr, aff) assert_array_equal(img.affine, aff) - aff[0,0] = 99 + aff[0, 0] = 99 assert_false(np.all(img.affine == aff)) # header, created by image creation ihdr = img.header @@ -257,7 +261,7 @@ def test_data_default(self): # is None, and that unsupported dtypes raise an error img_klass = self.image_class hdr_klass = self.image_class.header_class - data = np.arange(24, dtype=np.int32).reshape((2,3,4)) + data = np.arange(24, dtype=np.int32).reshape((2, 3, 4)) affine = np.eye(4) img = img_klass(data, affine) self.check_dtypes(data.dtype, img.get_data_dtype()) @@ -274,8 +278,8 @@ def test_data_shape(self): arr = np.arange(4, dtype=np.int16) img = img_klass(arr, np.eye(4)) assert_equal(img.shape, (4,)) - img = img_klass(np.zeros((2,3,4), dtype=np.float32), np.eye(4)) - assert_equal(img.shape, (2,3,4)) + img = img_klass(np.zeros((2, 3, 4), dtype=np.float32), np.eye(4)) + assert_equal(img.shape, (2, 3, 4)) def test_str(self): # Check something comes back from string representation @@ -286,7 +290,7 @@ def test_str(self): img = img_klass(arr, np.eye(4)) assert_true(len(str(img)) > 0) assert_equal(img.shape, (5,)) - img = img_klass(np.zeros((2,3,4), dtype=np.int16), np.eye(4)) + img = img_klass(np.zeros((2, 3, 4), dtype=np.int16), np.eye(4)) assert_true(len(str(img)) > 0) def test_get_shape(self): @@ -298,8 +302,8 @@ def test_get_shape(self): img = img_klass(np.arange(1, dtype=np.int16), np.eye(4)) with suppress_warnings(): assert_equal(img.get_shape(), (1,)) - img = img_klass(np.zeros((2,3,4), np.int16), np.eye(4)) - assert_equal(img.get_shape(), (2,3,4)) + img = img_klass(np.zeros((2, 3, 4), np.int16), np.eye(4)) + assert_equal(img.get_shape(), (2, 3, 4)) def test_get_data(self): # Test array image and proxy image interface @@ -359,14 +363,14 @@ def test_load_mmap(self): (top_load, fname), (img_klass.from_file_map, file_map)): for mmap, expected_mode in ( - # mmap value, expected memmap mode - # mmap=None -> no mmap value - # expected mode=None -> no memmap returned - (None, 'c'), - (True, 'c'), - ('c', 'c'), - ('r', 'r'), - (False, None)): + # mmap value, expected memmap mode + # mmap=None -> no mmap value + # expected mode=None -> no memmap returned + (None, 'c'), + (True, 'c'), + ('c', 'c'), + ('r', 'r'), + (False, None)): kwargs = {} if mmap is not None: kwargs['mmap'] = mmap diff --git a/nibabel/tests/test_spm2analyze.py b/nibabel/tests/test_spm2analyze.py index 2dc7d0f2da..e3d7d92497 100644 --- a/nibabel/tests/test_spm2analyze.py +++ b/nibabel/tests/test_spm2analyze.py @@ -27,20 +27,20 @@ def test_slope_inter(self): hdr = self.header_class() assert_equal(hdr.get_slope_inter(), (1.0, 0.0)) for in_tup, exp_err, out_tup, raw_slope in ( - ((2.0,), None, (2.0, 0.), 2.), - ((None,), None, (None, None), np.nan), - ((1.0, None), None, (1.0, 0.), 1.), - # non zero intercept causes error - ((None, 1.1), HeaderTypeError, (None, None), np.nan), - ((2.0, 1.1), HeaderTypeError, (None, None), 2.), - # null scalings - ((0.0, None), HeaderDataError, (None, None), 0.), - ((np.nan, np.nan), None, (None, None), np.nan), - ((np.nan, None), None, (None, None), np.nan), - ((None, np.nan), None, (None, None), np.nan), - ((np.inf, None), HeaderDataError, (None, None), np.inf), - ((-np.inf, None), HeaderDataError, (None, None), -np.inf), - ((None, 0.0), None, (None, None), np.nan)): + ((2.0,), None, (2.0, 0.), 2.), + ((None,), None, (None, None), np.nan), + ((1.0, None), None, (1.0, 0.), 1.), + # non zero intercept causes error + ((None, 1.1), HeaderTypeError, (None, None), np.nan), + ((2.0, 1.1), HeaderTypeError, (None, None), 2.), + # null scalings + ((0.0, None), HeaderDataError, (None, None), 0.), + ((np.nan, np.nan), None, (None, None), np.nan), + ((np.nan, None), None, (None, None), np.nan), + ((None, np.nan), None, (None, None), np.nan), + ((np.inf, None), HeaderDataError, (None, None), np.inf), + ((-np.inf, None), HeaderDataError, (None, None), -np.inf), + ((None, 0.0), None, (None, None), np.nan)): hdr = self.header_class() if not exp_err is None: assert_raises(exp_err, hdr.set_slope_inter, *in_tup) diff --git a/nibabel/tests/test_spm99analyze.py b/nibabel/tests/test_spm99analyze.py index 86b77f14ed..f810fb659e 100644 --- a/nibabel/tests/test_spm99analyze.py +++ b/nibabel/tests/test_spm99analyze.py @@ -51,10 +51,10 @@ class HeaderScalingMixin(object): def test_data_scaling(self): hdr = self.header_class() - hdr.set_data_shape((1,2,3)) + hdr.set_data_shape((1, 2, 3)) hdr.set_data_dtype(np.int16) S3 = BytesIO() - data = np.arange(6, dtype=np.float64).reshape((1,2,3)) + data = np.arange(6, dtype=np.float64).reshape((1, 2, 3)) # This uses scaling hdr.data_to_fileobj(data, S3) data_back = hdr.data_from_fileobj(S3) @@ -90,12 +90,12 @@ def test_big_scaling(self): # Test that upcasting works for huge scalefactors # See tests for apply_read_scaling in test_utils hdr = self.header_class() - hdr.set_data_shape((1,1,1)) + hdr.set_data_shape((1, 1, 1)) hdr.set_data_dtype(np.int16) sio = BytesIO() dtt = np.float32 # This will generate a huge scalefactor - data = np.array([type_info(dtt)['max']], dtype=dtt)[:,None, None] + data = np.array([type_info(dtt)['max']], dtype=dtt)[:, None, None] hdr.data_to_fileobj(data, sio) data_back = hdr.data_from_fileobj(sio) assert_true(np.allclose(data, data_back)) @@ -104,20 +104,20 @@ def test_slope_inter(self): hdr = self.header_class() assert_equal(hdr.get_slope_inter(), (1.0, None)) for in_tup, exp_err, out_tup, raw_slope in ( - ((2.0,), None, (2.0, None), 2.), - ((None,), None, (None, None), np.nan), - ((1.0, None), None, (1.0, None), 1.), - # non zero intercept causes error - ((None, 1.1), HeaderTypeError, (None, None), np.nan), - ((2.0, 1.1), HeaderTypeError, (None, None), 2.), - # null scalings - ((0.0, None), HeaderDataError, (None, None), 0.), - ((np.nan, np.nan), None, (None, None), np.nan), - ((np.nan, None), None, (None, None), np.nan), - ((None, np.nan), None, (None, None), np.nan), - ((np.inf, None), HeaderDataError, (None, None), np.inf), - ((-np.inf, None), HeaderDataError, (None, None), -np.inf), - ((None, 0.0), None, (None, None), np.nan)): + ((2.0,), None, (2.0, None), 2.), + ((None,), None, (None, None), np.nan), + ((1.0, None), None, (1.0, None), 1.), + # non zero intercept causes error + ((None, 1.1), HeaderTypeError, (None, None), np.nan), + ((2.0, 1.1), HeaderTypeError, (None, None), 2.), + # null scalings + ((0.0, None), HeaderDataError, (None, None), 0.), + ((np.nan, np.nan), None, (None, None), np.nan), + ((np.nan, None), None, (None, None), np.nan), + ((None, np.nan), None, (None, None), np.nan), + ((np.inf, None), HeaderDataError, (None, None), np.inf), + ((-np.inf, None), HeaderDataError, (None, None), -np.inf), + ((None, 0.0), None, (None, None), np.nan)): hdr = self.header_class() if not exp_err is None: assert_raises(exp_err, hdr.set_slope_inter, *in_tup) @@ -136,19 +136,19 @@ def test_origin_checks(self): HC = self.header_class # origin hdr = HC() - hdr.data_shape = [1,1,1] - hdr['origin'][0] = 101 # severity 20 + hdr.data_shape = [1, 1, 1] + hdr['origin'][0] = 101 # severity 20 fhdr, message, raiser = self.log_chk(hdr, 20) assert_equal(fhdr, hdr) assert_equal(message, 'very large origin values ' - 'relative to dims; leaving as set, ' - 'ignoring for affine') + 'relative to dims; leaving as set, ' + 'ignoring for affine') assert_raises(*raiser) # diagnose binary block dxer = self.header_class.diagnose_binaryblock assert_equal(dxer(hdr.binaryblock), - 'very large origin values ' - 'relative to dims') + 'very large origin values ' + 'relative to dims') class ImageScalingMixin(object): @@ -325,8 +325,8 @@ def test_no_scaling(self): slope = 2 inter = 10 if hdr.has_data_intercept else 0 for in_dtype, out_dtype in itertools.product( - FLOAT_TYPES + IUINT_TYPES, - supported_types): + FLOAT_TYPES + IUINT_TYPES, + supported_types): # Need to check complex scaling mn_in, mx_in = _dt_min_max(in_dtype) arr = np.array([mn_in, -1, 0, 1, 10, mx_in], dtype=in_dtype) @@ -389,7 +389,7 @@ def test_nan2zero_range_ok(self): img_class = self.image_class arr = np.arange(24, dtype=np.float32).reshape((2, 3, 4)) arr[0, 0, 0] = np.nan - arr[1, 0, 0] = 256 # to push outside uint8 range + arr[1, 0, 0] = 256 # to push outside uint8 range img = img_class(arr, np.eye(4)) rt_img = bytesio_round_trip(img) assert_array_equal(rt_img.get_data(), arr) @@ -443,8 +443,8 @@ class TestSpm99AnalyzeImage(test_analyze.TestAnalyzeImage, ImageScalingMixin): def test_mat_read(self): # Test mat file reading and writing for the SPM analyze types img_klass = self.image_class - arr = np.arange(24, dtype=np.int32).reshape((2,3,4)) - aff = np.diag([2,3,4,1]) # no LR flip in affine + arr = np.arange(24, dtype=np.int32).reshape((2, 3, 4)) + aff = np.diag([2, 3, 4, 1]) # no LR flip in affine img = img_klass(arr, aff) fm = img.file_map for key, value in fm.items(): @@ -463,34 +463,34 @@ def test_mat_read(self): mats = loadmat(mat_fileobj) assert_true('M' in mats and 'mat' in mats) from_111 = np.eye(4) - from_111[:3,3] = -1 + from_111[:3, 3] = -1 to_111 = np.eye(4) - to_111[:3,3] = 1 + to_111[:3, 3] = 1 assert_array_equal(mats['mat'], np.dot(aff, from_111)) # The M matrix does not include flips, so if we only have the M matrix # in the mat file, and we have default flipping, the mat resulting # should have a flip. The 'mat' matrix does include flips and so # should be unaffected by the flipping. If both are present we prefer # the the 'mat' matrix. - assert_true(img.header.default_x_flip) # check the default - flipper = np.diag([-1,1,1,1]) + assert_true(img.header.default_x_flip) # check the default + flipper = np.diag([-1, 1, 1, 1]) assert_array_equal(mats['M'], np.dot(aff, np.dot(flipper, from_111))) mat_fileobj.seek(0) savemat(mat_fileobj, - dict(M=np.diag([3,4,5,1]), mat=np.diag([6,7,8,1]))) + dict(M=np.diag([3, 4, 5, 1]), mat=np.diag([6, 7, 8, 1]))) # Check we are preferring the 'mat' matrix r_img = img_klass.from_file_map(fm) assert_array_equal(r_img.get_data(), arr) assert_array_equal(r_img.affine, - np.dot(np.diag([6,7,8,1]), to_111)) + np.dot(np.diag([6, 7, 8, 1]), to_111)) # But will use M if present mat_fileobj.seek(0) mat_fileobj.truncate(0) - savemat(mat_fileobj, dict(M=np.diag([3,4,5,1]))) + savemat(mat_fileobj, dict(M=np.diag([3, 4, 5, 1]))) r_img = img_klass.from_file_map(fm) assert_array_equal(r_img.get_data(), arr) assert_array_equal(r_img.affine, - np.dot(np.diag([3,4,5,1]), np.dot(flipper, to_111))) + np.dot(np.diag([3, 4, 5, 1]), np.dot(flipper, to_111))) def test_none_affine(self): # Allow for possibility of no affine resulting in nothing written into @@ -498,7 +498,7 @@ def test_none_affine(self): # it's a fileobj, we get an empty fileobj img_klass = self.image_class # With a None affine - no matfile written - img = img_klass(np.zeros((2,3,4)), None) + img = img_klass(np.zeros((2, 3, 4)), None) aff = img.header.get_best_affine() # Save / reload using bytes IO objects for key, value in img.file_map.items(): @@ -516,30 +516,30 @@ def test_origin_affine(): hdr.set_zooms((3, 2, 1)) assert_true(hdr.default_x_flip) assert_array_almost_equal( - hdr.get_origin_affine(), # from center of image - [[-3., 0., 0., 3.], - [ 0., 2., 0., -4.], - [ 0., 0., 1., -3.], - [ 0., 0., 0., 1.]]) - hdr['origin'][:3] = [3,4,5] + hdr.get_origin_affine(), # from center of image + [[-3., 0., 0., 3.], + [0., 2., 0., -4.], + [0., 0., 1., -3.], + [0., 0., 0., 1.]]) + hdr['origin'][:3] = [3, 4, 5] assert_array_almost_equal( - hdr.get_origin_affine(), # using origin - [[-3., 0., 0., 6.], - [ 0., 2., 0., -6.], - [ 0., 0., 1., -4.], - [ 0., 0., 0., 1.]]) - hdr['origin'] = 0 # unset origin + hdr.get_origin_affine(), # using origin + [[-3., 0., 0., 6.], + [0., 2., 0., -6.], + [0., 0., 1., -4.], + [0., 0., 0., 1.]]) + hdr['origin'] = 0 # unset origin hdr.set_data_shape((3, 5)) assert_array_almost_equal( hdr.get_origin_affine(), - [[-3., 0., 0., 3.], - [ 0., 2., 0., -4.], - [ 0., 0., 1., -0.], - [ 0., 0., 0., 1.]]) + [[-3., 0., 0., 3.], + [0., 2., 0., -4.], + [0., 0., 1., -0.], + [0., 0., 0., 1.]]) hdr.set_data_shape((3, 5, 7)) assert_array_almost_equal( - hdr.get_origin_affine(), # from center of image - [[-3., 0., 0., 3.], - [ 0., 2., 0., -4.], - [ 0., 0., 1., -3.], - [ 0., 0., 0., 1.]]) + hdr.get_origin_affine(), # from center of image + [[-3., 0., 0., 3.], + [0., 2., 0., -4.], + [0., 0., 1., -3.], + [0., 0., 0., 1.]]) diff --git a/nibabel/tests/test_testing.py b/nibabel/tests/test_testing.py index c59e921a03..7a4b3d81ce 100644 --- a/nibabel/tests/test_testing.py +++ b/nibabel/tests/test_testing.py @@ -117,10 +117,11 @@ def test_warn_error(): n_warns = len(warnings.filters) with error_warnings(): assert_raises(UserWarning, warnings.warn, 'A test') - with error_warnings() as w: # w not used for anything + with error_warnings() as w: # w not used for anything assert_raises(UserWarning, warnings.warn, 'A test') assert_equal(n_warns, len(warnings.filters)) # Check other errors are propagated + def f(): with error_warnings(): raise ValueError('An error') @@ -133,11 +134,12 @@ def test_warn_ignore(): with suppress_warnings(): warnings.warn('Here is a warning, you will not see it') warnings.warn('Nor this one', DeprecationWarning) - with suppress_warnings() as w: # w not used + with suppress_warnings() as w: # w not used warnings.warn('Here is a warning, you will not see it') warnings.warn('Nor this one', DeprecationWarning) assert_equal(n_warns, len(warnings.filters)) # Check other errors are propagated + def f(): with suppress_warnings(): raise ValueError('An error') diff --git a/nibabel/tests/test_tmpdirs.py b/nibabel/tests/test_tmpdirs.py index 82f1dcb464..48fa5885a9 100644 --- a/nibabel/tests/test_tmpdirs.py +++ b/nibabel/tests/test_tmpdirs.py @@ -11,6 +11,7 @@ MY_PATH = abspath(__file__) MY_DIR = dirname(MY_PATH) + def test_given_directory(): # Test InGivenDirectory cwd = getcwd() diff --git a/nibabel/tests/test_trackvis.py b/nibabel/tests/test_trackvis.py index 94e5bf93bb..1a240957ab 100644 --- a/nibabel/tests/test_trackvis.py +++ b/nibabel/tests/test_trackvis.py @@ -20,26 +20,30 @@ def test_write(): out_f = BytesIO() tv.write(out_f, [], {}) assert_equal(out_f.getvalue(), tv.empty_header().tostring()) - out_f.truncate(0); out_f.seek(0) + out_f.truncate(0) + out_f.seek(0) # Write something not-default - tv.write(out_f, [], {'id_string':'TRACKb'}) + tv.write(out_f, [], {'id_string': 'TRACKb'}) # read it back out_f.seek(0) streams, hdr = tv.read(out_f) assert_equal(hdr['id_string'], b'TRACKb') # check that we can pass none for the header - out_f.truncate(0); out_f.seek(0) + out_f.truncate(0) + out_f.seek(0) tv.write(out_f, []) - out_f.truncate(0); out_f.seek(0) + out_f.truncate(0) + out_f.seek(0) tv.write(out_f, [], None) # check that we check input values - out_f.truncate(0); out_f.seek(0) + out_f.truncate(0) + out_f.seek(0) assert_raises(tv.HeaderError, - tv.write, out_f, [],{'id_string':'not OK'}) + tv.write, out_f, [], {'id_string': 'not OK'}) assert_raises(tv.HeaderError, - tv.write, out_f, [],{'version': 3}) + tv.write, out_f, [], {'version': 3}) assert_raises(tv.HeaderError, - tv.write, out_f, [],{'hdr_size': 0}) + tv.write, out_f, [], {'hdr_size': 0}) def test_write_scalars_props(): @@ -47,8 +51,8 @@ def test_write_scalars_props(): N = 6 M = 2 P = 4 - points = np.arange(N*3).reshape((N,3)) - scalars = np.arange(N*M).reshape((N,M)) + 100 + points = np.arange(N * 3).reshape((N, 3)) + scalars = np.arange(N * M).reshape((N, M)) + 100 props = np.arange(P) + 1000 # If scalars not same size for each point, error out_f = BytesIO() @@ -56,11 +60,11 @@ def test_write_scalars_props(): (points, scalars, None)] assert_raises(tv.DataError, tv.write, out_f, streams) out_f.seek(0) - streams = [(points, np.zeros((N,M+1)), None), + streams = [(points, np.zeros((N, M + 1)), None), (points, scalars, None)] assert_raises(tv.DataError, tv.write, out_f, streams) # Or if scalars different N compared to points - bad_scalars = np.zeros((N+1,M)) + bad_scalars = np.zeros((N + 1, M)) out_f.seek(0) streams = [(points, bad_scalars, None), (points, bad_scalars, None)] @@ -71,7 +75,7 @@ def test_write_scalars_props(): (points, scalars, props)] assert_raises(tv.DataError, tv.write, out_f, streams) out_f.seek(0) - streams = [(points, scalars, np.zeros((P+1,))), + streams = [(points, scalars, np.zeros((P + 1,))), (points, scalars, props)] assert_raises(tv.DataError, tv.write, out_f, streams) # If all is OK, then we get back what we put in @@ -125,8 +129,8 @@ def streamlist_equal(streamlist1, streamlist2): def test_round_trip(): out_f = BytesIO() - xyz0 = np.tile(np.arange(5).reshape(5,1), (1, 3)) - xyz1 = np.tile(np.arange(5).reshape(5,1) + 10, (1, 3)) + xyz0 = np.tile(np.arange(5).reshape(5, 1), (1, 3)) + xyz1 = np.tile(np.arange(5).reshape(5, 1) + 10, (1, 3)) streams = [(xyz0, None, None), (xyz1, None, None)] tv.write(out_f, streams, {}) out_f.seek(0) @@ -134,9 +138,9 @@ def test_round_trip(): assert_true(streamlist_equal(streams, streams2)) # test that we can write in different endianness and get back same result, # for versions 1, 2 and not-specified - for in_dict, back_version in (({},2), - ({'version':2}, 2), - ({'version':1}, 1)): + for in_dict, back_version in (({}, 2), + ({'version': 2}, 2), + ({'version': 1}, 1)): for endian_code in (native_code, swapped_code): out_f.seek(0) tv.write(out_f, streams, in_dict, endian_code) @@ -167,6 +171,7 @@ def test_round_trip(): def test_points_processing(): # We may need to process points if they are in voxel or mm format out_f = BytesIO() + def _rt(streams, hdr, points_space): # run round trip through IO object out_f.seek(0) @@ -176,15 +181,15 @@ def _rt(streams, hdr, points_space): out_f.seek(0) return res0, tv.read(out_f, points_space=points_space) n_pts = 5 - ijk0 = np.arange(n_pts * 3).reshape((n_pts,3)) / 2.0 + ijk0 = np.arange(n_pts * 3).reshape((n_pts, 3)) / 2.0 ijk1 = ijk0 + 20 # Check with and without some scalars for scalars in ((None, None), (np.arange(n_pts)[:, None], np.arange(n_pts)[:, None] + 99)): vx_streams = [(ijk0, scalars[0], None), (ijk1, scalars[1], None)] - vxmm_streams = [(ijk0 * [[2,3,4]], scalars[0], None), - (ijk1 * [[2,3,4]], scalars[1], None)] + vxmm_streams = [(ijk0 * [[2, 3, 4]], scalars[0], None), + (ijk1 * [[2, 3, 4]], scalars[1], None)] # voxmm is the default. In this case we don't do anything to the # points, and we let the header pass through without further checks (raw_streams, hdr), (proc_streams, _) = _rt(vxmm_streams, {}, None) @@ -195,19 +200,19 @@ def _rt(streams, hdr, points_space): assert_true(streamlist_equal(vxmm_streams, proc_streams)) # with 'voxels' as input, check for not all voxel_size == 0, warn if any # voxel_size == 0 - for hdr in ( # these cause read / write errors - # empty header has 0 voxel sizes - {}, - {'voxel_size': [0,0,0]}, # the default - {'voxel_size': [-2,3,4]}, # negative not valid - ): + for hdr in ( # these cause read / write errors + # empty header has 0 voxel sizes + {}, + {'voxel_size': [0, 0, 0]}, # the default + {'voxel_size': [-2, 3, 4]}, # negative not valid + ): # Check error on write out_f.seek(0) assert_raises(tv.HeaderError, - tv.write, out_f, vx_streams, hdr, None, 'voxel') + tv.write, out_f, vx_streams, hdr, None, 'voxel') out_f.seek(0) # bypass write error and check read - tv.write(out_f, vxmm_streams, hdr, None, points_space = None) + tv.write(out_f, vxmm_streams, hdr, None, points_space=None) out_f.seek(0) assert_raises(tv.HeaderError, tv.read, out_f, False, 'voxel') # There's a warning for any voxel sizes == 0 @@ -222,56 +227,57 @@ def _rt(streams, hdr, points_space): # Now we try with rasmm points. In this case we need valid voxel_size, # and voxel_order, and vox_to_ras. The voxel_order has to match the # vox_to_ras, and so do the voxel sizes - aff = np.diag([2,3,4,1]) + aff = np.diag([2, 3, 4, 1]) # In this case the trk -> vx and vx -> mm invert each other rasmm_streams = vxmm_streams - for hdr in ( # all these cause read and write errors for rasmm + for hdr in ( # all these cause read and write errors for rasmm # Empty header has no valid affine {}, # Error if ras_to_mm not defined (as in version 1) - {'voxel_size': [2, 3, 4], 'voxel_order': 'RAS', 'version':1}, + {'voxel_size': [2, 3, 4], 'voxel_order': 'RAS', 'version': 1}, # or it's all zero {'voxel_size': [2, 3, 4], 'voxel_order': 'RAS', - 'vox_to_ras': np.zeros((4,4))}, + 'vox_to_ras': np.zeros((4, 4))}, # as it is by default {'voxel_size': [2, 3, 4], 'voxel_order': 'RAS'}, # or the voxel_size doesn't match the affine {'voxel_size': [2, 2, 4], 'voxel_order': 'RAS', - 'vox_to_ras': aff}, + 'vox_to_ras': aff}, # or the voxel_order doesn't match the affine {'voxel_size': [2, 3, 4], 'voxel_order': 'LAS', - 'vox_to_ras': aff}, - ): + 'vox_to_ras': aff}, + ): # Check error on write out_f.seek(0) assert_raises(tv.HeaderError, - tv.write, out_f, rasmm_streams, hdr, None, 'rasmm') + tv.write, out_f, rasmm_streams, hdr, None, 'rasmm') out_f.seek(0) # bypass write error and check read - tv.write(out_f, vxmm_streams, hdr, None, points_space = None) + tv.write(out_f, vxmm_streams, hdr, None, points_space=None) out_f.seek(0) assert_raises(tv.HeaderError, tv.read, out_f, False, 'rasmm') # This should be OK hdr = {'voxel_size': [2, 3, 4], 'voxel_order': 'RAS', - 'vox_to_ras': aff} + 'vox_to_ras': aff} (raw_streams, hdr), (proc_streams, _) = _rt(rasmm_streams, hdr, 'rasmm') assert_true(streamlist_equal(vxmm_streams, raw_streams)) assert_true(streamlist_equal(rasmm_streams, proc_streams)) # More complex test to check matrix orientation fancy_affine = np.array([[0., -2, 0, 10], - [3, 0, 0, 20], - [0, 0, 4, 30], - [0, 0, 0, 1]]) + [3, 0, 0, 20], + [0, 0, 4, 30], + [0, 0, 0, 1]]) hdr = {'voxel_size': [3, 2, 4], 'voxel_order': 'ALS', - 'vox_to_ras': fancy_affine} - def f(pts): # from vx to mm - pts = pts[:,[1,0,2]] * [[-2,3,4]] # apply zooms / reorder - return pts + [[10,20,30]] # apply translations + 'vox_to_ras': fancy_affine} + + def f(pts): # from vx to mm + pts = pts[:, [1, 0, 2]] * [[-2, 3, 4]] # apply zooms / reorder + return pts + [[10, 20, 30]] # apply translations xyz0, xyz1 = f(ijk0), f(ijk1) fancy_rasmm_streams = [(xyz0, scalars[0], None), (xyz1, scalars[1], None)] - fancy_vxmm_streams = [(ijk0 * [[3,2,4]], scalars[0], None), - (ijk1 * [[3,2,4]], scalars[1], None)] + fancy_vxmm_streams = [(ijk0 * [[3, 2, 4]], scalars[0], None), + (ijk1 * [[3, 2, 4]], scalars[1], None)] (raw_streams, hdr), (proc_streams, _) = _rt( fancy_rasmm_streams, hdr, 'rasmm') assert_true(streamlist_equal(fancy_vxmm_streams, raw_streams)) @@ -289,7 +295,7 @@ def test__check_hdr_points_space(): # Input not in (None, 'voxmm', 'voxels', 'rasmm') - error # voxels means check voxel sizes present and not all 0. hdr = tv.empty_header() - assert_array_equal(hdr['voxel_size'], [0,0,0]) + assert_array_equal(hdr['voxel_size'], [0, 0, 0]) assert_raises(tv.HeaderError, tv._check_hdr_points_space, hdr, 'voxel') # Negative voxel size gives error - because it is not what trackvis does, @@ -317,19 +323,19 @@ def test__check_hdr_points_space(): tv._check_hdr_points_space, hdr, 'rasmm') # nearly an affine, but 0 at position 3,3 - means not recorded in trackvis # standard - hdr['vox_to_ras'] = np.diag([2,3,4,0]) + hdr['vox_to_ras'] = np.diag([2, 3, 4, 0]) assert_raises(tv.HeaderError, tv._check_hdr_points_space, hdr, 'rasmm') # This affine doesn't match RAS voxel order - hdr['vox_to_ras'] = np.diag([-2,3,4,1]) + hdr['vox_to_ras'] = np.diag([-2, 3, 4, 1]) assert_raises(tv.HeaderError, tv._check_hdr_points_space, hdr, 'rasmm') # This affine doesn't match the voxel size - hdr['vox_to_ras'] = np.diag([3,3,4,1]) + hdr['vox_to_ras'] = np.diag([3, 3, 4, 1]) assert_raises(tv.HeaderError, tv._check_hdr_points_space, hdr, 'rasmm') # This should be OK - good_aff = np.diag([2,3,4,1]) + good_aff = np.diag([2, 3, 4, 1]) hdr['vox_to_ras'] = good_aff assert_equal(tv._check_hdr_points_space(hdr, 'rasmm'), None) @@ -339,7 +345,7 @@ def test__check_hdr_points_space(): assert_raises(tv.HeaderError, tv._check_hdr_points_space, hdr, 'rasmm') # this affine does have LPS voxel order - good_lps = np.dot(np.diag([-1,-1,1,1]), good_aff) + good_lps = np.dot(np.diag([-1, -1, 1, 1]), good_aff) hdr['vox_to_ras'] = good_lps assert_equal(tv._check_hdr_points_space(hdr, 'rasmm'), None) @@ -354,9 +360,9 @@ def test_empty_header(): assert_equal(hdr['hdr_size'], 1000) assert_array_equal( hdr['image_orientation_patient'], - [0,0,0,0,0,0]) + [0, 0, 0, 0, 0, 0]) hdr = tv.empty_header(version=2) - assert_array_equal(hdr['vox_to_ras'], np.zeros((4,4))) + assert_array_equal(hdr['vox_to_ras'], np.zeros((4, 4))) hdr_endian = tv.endian_codes[tv.empty_header().dtype.byteorder] assert_equal(hdr_endian, tv.native_code) @@ -372,22 +378,22 @@ def test_get_affine(): old_afh = partial(tv.aff_from_hdr, atleast_v2=False) # default header gives useless affine assert_array_equal(old_afh(hdr), - np.diag([0,0,0,1])) + np.diag([0, 0, 0, 1])) hdr['voxel_size'] = 1 assert_array_equal(old_afh(hdr), - np.diag([0,0,0,1])) + np.diag([0, 0, 0, 1])) # DICOM direction cosines - hdr['image_orientation_patient'] = [1,0,0,0,1,0] + hdr['image_orientation_patient'] = [1, 0, 0, 0, 1, 0] assert_array_equal(old_afh(hdr), - np.diag([-1,-1,1,1])) + np.diag([-1, -1, 1, 1])) # RAS direction cosines - hdr['image_orientation_patient'] = [-1,0,0,0,-1,0] + hdr['image_orientation_patient'] = [-1, 0, 0, 0, -1, 0] assert_array_equal(old_afh(hdr), np.eye(4)) # translations - hdr['origin'] = [1,2,3] + hdr['origin'] = [1, 2, 3] exp_aff = np.eye(4) - exp_aff[:3,3] = [-1,-2,3] + exp_aff[:3, 3] = [-1, -2, 3] assert_array_equal(old_afh(hdr), exp_aff) # check against voxel order. This one works @@ -400,12 +406,12 @@ def test_get_affine(): # This one does work because the routine allows the final dimension to # be flipped to try and match the voxel order hdr['voxel_order'] = 'RAI' - exp_aff = exp_aff * [[1,1,-1,1]] + exp_aff = exp_aff * [[1, 1, -1, 1]] assert_array_equal(old_afh(hdr), exp_aff) # Check round trip case for flipped and unflipped, when positive voxels # only allowed. This checks that the flipping heuristic works. flipped_aff = exp_aff - unflipped_aff = exp_aff * [1,1,-1,1] + unflipped_aff = exp_aff * [1, 1, -1, 1] for in_aff, o_codes in ((unflipped_aff, b'RAS'), (flipped_aff, b'RAI')): hdr = tv.empty_header() @@ -421,15 +427,15 @@ def test_get_affine(): # now use the easier vox_to_ras field hdr = tv.empty_header() aff = np.eye(4) - aff[:3,:] = np.arange(12).reshape(3,4) + aff[:3, :] = np.arange(12).reshape(3, 4) hdr['vox_to_ras'] = aff # Pass v2 flag explicitly to avoid warnings assert_array_equal(tv.aff_from_hdr(hdr, atleast_v2=False), aff) # mappings work too d = {'version': 1, - 'voxel_size': np.array([1,2,3]), - 'image_orientation_patient': np.array([1,0,0,0,1,0]), - 'origin': np.array([10,11,12])} + 'voxel_size': np.array([1, 2, 3]), + 'image_orientation_patient': np.array([1, 0, 0, 0, 1, 0]), + 'origin': np.array([10, 11, 12])} aff = tv.aff_from_hdr(d, atleast_v2=False) @@ -438,16 +444,16 @@ def test_aff_to_hdr(): # This is the call to get the old behavior old_a2h = partial(tv.aff_to_hdr, pos_vox=False, set_order=False) hdr = {'version': 1} - affine = np.diag([1,2,3,1]) - affine[:3,3] = [10,11,12] + affine = np.diag([1, 2, 3, 1]) + affine[:3, 3] = [10, 11, 12] old_a2h(affine, hdr) assert_array_almost_equal(tv.aff_from_hdr(hdr, atleast_v2=False), affine) # put flip into affine aff2 = affine.copy() - aff2[:,2] *=-1 + aff2[:, 2] *= -1 old_a2h(aff2, hdr) # Historically we flip the first axis if there is a negative determinant - assert_array_almost_equal(hdr['voxel_size'], [-1,2,3]) + assert_array_almost_equal(hdr['voxel_size'], [-1, 2, 3]) assert_array_almost_equal(tv.aff_from_hdr(hdr, atleast_v2=False), aff2) # Test that default mode raises DeprecationWarning with error_warnings(): @@ -460,7 +466,7 @@ def test_aff_to_hdr(): tv.aff_to_hdr(affine, hdr) assert_array_almost_equal(tv.aff_from_hdr(hdr, atleast_v2=False), affine) # Check pos_vox and order flags - for hdr in ({}, {'version':2}, {'version':1}): + for hdr in ({}, {'version': 2}, {'version': 1}): tv.aff_to_hdr(aff2, hdr, pos_vox=True, set_order=False) assert_array_equal(hdr['voxel_size'], [1, 2, 3]) assert_false('voxel_order' in hdr) @@ -485,27 +491,29 @@ def test_tv_class(): out_f = BytesIO() tvf.to_file(out_f) assert_equal(out_f.getvalue(), tv.empty_header().tostring()) - out_f.truncate(0); out_f.seek(0) + out_f.truncate(0) + out_f.seek(0) # Write something not-default - tvf = tv.TrackvisFile([], {'id_string':'TRACKb'}) + tvf = tv.TrackvisFile([], {'id_string': 'TRACKb'}) tvf.to_file(out_f) # read it back out_f.seek(0) tvf_back = tv.TrackvisFile.from_file(out_f) assert_equal(tvf_back.header['id_string'], b'TRACKb') # check that we check input values - out_f.truncate(0); out_f.seek(0) + out_f.truncate(0) + out_f.seek(0) assert_raises(tv.HeaderError, tv.TrackvisFile, - [],{'id_string':'not OK'}) + [], {'id_string': 'not OK'}) assert_raises(tv.HeaderError, tv.TrackvisFile, - [],{'version': 3}) + [], {'version': 3}) assert_raises(tv.HeaderError, tv.TrackvisFile, - [],{'hdr_size':0}) - affine = np.diag([1,2,3,1]) - affine[:3,3] = [10,11,12] + [], {'hdr_size': 0}) + affine = np.diag([1, 2, 3, 1]) + affine[:3, 3] = [10, 11, 12] # affine methods will raise same warnings and errors as function with error_warnings(): assert_raises(FutureWarning, tvf.set_affine, affine) @@ -526,11 +534,11 @@ def test_tv_class(): def test_tvfile_io(): # Test reading and writing tracks with file class out_f = BytesIO() - ijk0 = np.arange(15).reshape((5,3)) / 2.0 + ijk0 = np.arange(15).reshape((5, 3)) / 2.0 ijk1 = ijk0 + 20 vx_streams = [(ijk0, None, None), (ijk1, None, None)] - vxmm_streams = [(ijk0 * [[2,3,4]], None, None), - (ijk1 * [[2,3,4]], None, None)] + vxmm_streams = [(ijk0 * [[2, 3, 4]], None, None), + (ijk1 * [[2, 3, 4]], None, None)] # Roundtrip basic tvf = tv.TrackvisFile(vxmm_streams) tvf.to_file(out_f) @@ -546,7 +554,7 @@ def test_tvfile_io(): assert_raises(tv.HeaderError, tvf.to_file, out_f) out_f.seek(0) # With voxel size, no error, roundtrip works - tvf.header['voxel_size'] = [2,3,4] + tvf.header['voxel_size'] = [2, 3, 4] tvf.to_file(out_f) out_f.seek(0) tvf2 = tv.TrackvisFile.from_file(out_f, points_space='voxel') @@ -555,7 +563,7 @@ def test_tvfile_io(): out_f.seek(0) # Also with affine specified tvf = tv.TrackvisFile(vx_streams, points_space='voxel', - affine=np.diag([2,3,4,1])) + affine=np.diag([2, 3, 4, 1])) tvf.to_file(out_f) out_f.seek(0) tvf2 = tv.TrackvisFile.from_file(out_f, points_space='voxel') @@ -565,9 +573,10 @@ def test_tvfile_io(): [3, 0, 0, 20], [0, 0, 4, 30], [0, 0, 0, 1]]) - def f(pts): # from vx to mm - pts = pts[:,[1,0,2]] * [[-2,3,4]] # apply zooms / reorder - return pts + [[10,20,30]] # apply translations + + def f(pts): # from vx to mm + pts = pts[:, [1, 0, 2]] * [[-2, 3, 4]] # apply zooms / reorder + return pts + [[10, 20, 30]] # apply translations xyz0, xyz1 = f(ijk0), f(ijk1) fancy_rasmm_streams = [(xyz0, None, None), (xyz1, None, None)] # Roundtrip diff --git a/nibabel/tests/test_utils.py b/nibabel/tests/test_utils.py index 77e8f47fd5..24374b3f1f 100644 --- a/nibabel/tests/test_utils.py +++ b/nibabel/tests/test_utils.py @@ -44,7 +44,7 @@ rec2dict, _dt_min_max, _write_data, - ) + ) from ..openers import Opener from ..casting import (floor_log2, type_info, OK_FLOATS, shared_range) @@ -92,8 +92,8 @@ def make_array(n, bytes): fname = 'test.bin' with InTemporaryDirectory(): for n, opener in itertools.product( - (256, 1024, 2560, 25600), - (open, gzip.open, bz2.BZ2File)): + (256, 1024, 2560, 25600), + (open, gzip.open, bz2.BZ2File)): in_arr = np.arange(n, dtype=dtype) # Write array to file fobj_w = opener(fname, 'wb') @@ -123,7 +123,7 @@ def make_array(n, bytes): def test_array_from_file(): - shape = (2,3,4) + shape = (2, 3, 4) dtype = np.dtype(np.float32) in_arr = np.arange(24, dtype=dtype).reshape(shape) # Check on string buffers @@ -151,7 +151,7 @@ def test_array_from_file(): assert_equal(len(arr), 0) # Check error from small file assert_raises(IOError, array_from_file, - shape, dtype, BytesIO()) + shape, dtype, BytesIO()) # check on real file fd, fname = tempfile.mkstemp() with InTemporaryDirectory(): @@ -160,7 +160,7 @@ def test_array_from_file(): # For windows this will raise a WindowsError from mmap, Unices # appear to raise an IOError assert_raises(Exception, array_from_file, - shape, dtype, in_buf) + shape, dtype, in_buf) del in_buf @@ -211,7 +211,7 @@ def buf_chk(in_arr, out_buf, in_buf, offset): instr = b' ' * offset + in_arr.tostring(order='F') out_buf.write(instr) out_buf.flush() - if in_buf is None: # we're using in_buf from out_buf + if in_buf is None: # we're using in_buf from out_buf out_buf.seek(0) in_buf = out_buf arr = array_from_file( @@ -224,7 +224,7 @@ def buf_chk(in_arr, out_buf, in_buf, offset): def test_array_from_file_openers(): # Test array_from_file also works with Opener objects - shape = (2,3,4) + shape = (2, 3, 4) dtype = np.dtype(np.float32) in_arr = np.arange(24, dtype=dtype).reshape(shape) with InTemporaryDirectory(): @@ -232,7 +232,7 @@ def test_array_from_file_openers(): (0, 5, 10)): fname = 'test.bin' + ext with Opener(fname, 'wb') as out_buf: - if offset != 0: # avoid https://bugs.python.org/issue16828 + if offset != 0: # avoid https://bugs.python.org/issue16828 out_buf.write(b' ' * offset) out_buf.write(in_arr.tostring(order='F')) with Opener(fname, 'rb') as in_buf: @@ -250,10 +250,10 @@ def test_array_from_file_reread(): fname = 'test.bin' with InTemporaryDirectory(): for shape, opener, dtt, order in itertools.product( - ((64,), (64, 65), (64, 65, 66)), - (open, gzip.open, bz2.BZ2File, BytesIO), - (np.int16, np.float32), - ('F', 'C')): + ((64,), (64, 65), (64, 65, 66)), + (open, gzip.open, bz2.BZ2File, BytesIO), + (np.int16, np.float32), + ('F', 'C')): n_els = np.prod(shape) in_arr = np.arange(n_els, dtype=dtt).reshape(shape) is_bio = hasattr(opener, 'getvalue') @@ -308,10 +308,10 @@ def test_a2f_intercept_scale(): str_io = BytesIO() # intercept data_back = write_return(arr, str_io, np.float64, 0, 1.0) - assert_array_equal(data_back, arr-1) + assert_array_equal(data_back, arr - 1) # scaling data_back = write_return(arr, str_io, np.float64, 0, 1.0, 2.0) - assert_array_equal(data_back, (arr-1) / 2.0) + assert_array_equal(data_back, (arr - 1) / 2.0) def test_a2f_upscale(): @@ -326,7 +326,7 @@ def test_a2f_upscale(): str_io = BytesIO() # We need to provide mn, mx for function to be able to calculate upcasting array_to_file(arr, str_io, np.uint8, intercept=inter, divslope=slope, - mn = info['min'], mx = info['max']) + mn=info['min'], mx=info['max']) raw = array_from_file(arr.shape, np.uint8, str_io) back = apply_read_scaling(raw, slope, inter) top = back - arr @@ -373,7 +373,7 @@ def test_a2f_order(): data_back = write_return(arr, str_io, ndt, order='C') assert_array_equal(data_back, [0.0, 1.0, 2.0]) # but does in the 2D case - arr = np.array([[0.0, 1.0],[2.0, 3.0]]) + arr = np.array([[0.0, 1.0], [2.0, 3.0]]) data_back = write_return(arr, str_io, ndt, order='F') assert_array_equal(data_back, arr) data_back = write_return(arr, str_io, ndt, order='C') @@ -384,15 +384,15 @@ def test_a2f_nan2zero(): ndt = np.dtype(np.float) str_io = BytesIO() # nans set to 0 for integer output case, not float - arr = np.array([[np.nan, 0],[0, np.nan]]) - data_back = write_return(arr, str_io, ndt) # float, thus no effect + arr = np.array([[np.nan, 0], [0, np.nan]]) + data_back = write_return(arr, str_io, ndt) # float, thus no effect assert_array_equal(data_back, arr) # True is the default, but just to show it's possible data_back = write_return(arr, str_io, ndt, nan2zero=True) assert_array_equal(data_back, arr) with np.errstate(invalid='ignore'): data_back = write_return(arr, str_io, np.int64, nan2zero=True) - assert_array_equal(data_back, [[0, 0],[0, 0]]) + assert_array_equal(data_back, [[0, 0], [0, 0]]) # otherwise things get a bit weird; tidied here # How weird? Look at arr.astype(np.int64) with np.errstate(invalid='ignore'): @@ -414,10 +414,10 @@ def test_a2f_nan2zero_scaling(): # Array values including zero before scaling but not after bio = BytesIO() for in_dt, out_dt, zero_in, inter in itertools.product( - FLOAT_TYPES, - IUINT_TYPES, - (True, False), - (0, -100)): + FLOAT_TYPES, + IUINT_TYPES, + (True, False), + (0, -100)): in_info = np.finfo(in_dt) out_info = np.iinfo(out_dt) mx = min(in_info.max, out_info.max * 2., 2**32) + inter @@ -528,10 +528,10 @@ def test_a2f_scaled_unscaled(): # without scaling fobj = BytesIO() for in_dtype, out_dtype, intercept, divslope in itertools.product( - NUMERIC_TYPES, - NUMERIC_TYPES, - (0, 0.5, -1, 1), - (1, 0.5, 2)): + NUMERIC_TYPES, + NUMERIC_TYPES, + (0, 0.5, -1, 1), + (1, 0.5, 2)): mn_in, mx_in = _dt_min_max(in_dtype) nan_val = np.nan if in_dtype in CFLOAT_TYPES else 10 arr = np.array([mn_in, -1, 0, 1, mx_in, nan_val], dtype=in_dtype) @@ -557,17 +557,17 @@ def test_a2f_scaled_unscaled(): intercept=intercept) exp_back = arr.copy() if (in_dtype in IUINT_TYPES and - out_dtype in IUINT_TYPES and - (intercept, divslope) == (0, 1)): + out_dtype in IUINT_TYPES and + (intercept, divslope) == (0, 1)): # Direct iu to iu casting. # Need to clip if ranges not the same. # Use smaller of input, output range to avoid np.clip upcasting # the array because of large clip limits. if (mn_in, mx_in) != (mn_out, mx_out): exp_back = np.clip(exp_back, - max(mn_in, mn_out), - min(mx_in, mx_out)) - else: # Need to deal with nans, casting to float, clipping + max(mn_in, mn_out), + min(mx_in, mx_out)) + else: # Need to deal with nans, casting to float, clipping if in_dtype in CFLOAT_TYPES and out_dtype in IUINT_TYPES: exp_back[np.isnan(exp_back)] = 0 if in_dtype not in COMPLEX_TYPES: @@ -577,7 +577,7 @@ def test_a2f_scaled_unscaled(): if divslope != 1: exp_back /= divslope if (exp_back.dtype.type in CFLOAT_TYPES and - out_dtype in IUINT_TYPES): + out_dtype in IUINT_TYPES): exp_back = np.round(exp_back).astype(float) exp_back = np.clip(exp_back, *shared_range(float, out_dtype)) exp_back = exp_back.astype(out_dtype) @@ -601,12 +601,12 @@ def test_a2f_bad_scaling(): 'uint', 'float', 'complex']], - []) + []) for in_type, out_type, slope, inter in itertools.product( - NUMERICAL_TYPES, - NUMERICAL_TYPES, - (None, 1, 0, np.nan, -np.inf, np.inf), - (0, np.nan, -np.inf, np.inf)): + NUMERICAL_TYPES, + NUMERICAL_TYPES, + (None, 1, 0, np.nan, -np.inf, np.inf), + (0, np.nan, -np.inf, np.inf)): arr = np.ones((2,), dtype=in_type) fobj = BytesIO() if (slope, inter) == (1, 0): @@ -736,7 +736,7 @@ def test_apply_scaling(): assert_equal((i16_arr * big).dtype, np.float32) # An equivalent case is a little hard to find for the intercept nmant_32 = type_info(np.float32)['nmant'] - big_delta = np.float32(2**(floor_log2(big)-nmant_32)) + big_delta = np.float32(2**(floor_log2(big) - nmant_32)) assert_equal((i16_arr * big_delta + big).dtype, np.float32) # Upcasting does occur with this routine assert_equal(apply_read_scaling(i16_arr, big).dtype, np.float64) @@ -797,15 +797,15 @@ def wt(*args, **kwargs): assert_equal(wt(in_type, 1.0, 0.0), in_ts) in_val = d1(in_type(0)) for slope_type in NUMERIC_TYPES: - sl_val = slope_type(1) # no scaling, regardless of type + sl_val = slope_type(1) # no scaling, regardless of type assert_equal(wt(in_type, sl_val, 0.0), in_ts) - sl_val = slope_type(2) # actual scaling + sl_val = slope_type(2) # actual scaling out_val = in_val / d1(sl_val) assert_equal(wt(in_type, sl_val), out_val.dtype.str) for inter_type in NUMERIC_TYPES: - i_val = inter_type(0) # no scaling, regardless of type + i_val = inter_type(0) # no scaling, regardless of type assert_equal(wt(in_type, 1, i_val), in_ts) - i_val = inter_type(1) # actual scaling + i_val = inter_type(1) # actual scaling out_val = in_val - d1(i_val) assert_equal(wt(in_type, 1, i_val), out_val.dtype.str) # Combine scaling and intercept @@ -859,8 +859,8 @@ def test_best_write_scale_ftype(): # Information on this float L_info = type_info(lower_t) t_max = L_info['max'] - nmant = L_info['nmant'] # number of significand digits - big_delta = lower_t(2**(floor_log2(t_max) - nmant)) # delta below max + nmant = L_info['nmant'] # number of significand digits + big_delta = lower_t(2**(floor_log2(t_max) - nmant)) # delta below max # Even large values that don't overflow don't change output arr = np.array([0, t_max], dtype=lower_t) assert_equal(best_write_scale_ftype(arr, 1, 0), lower_t) @@ -869,8 +869,8 @@ def test_best_write_scale_ftype(): # Scaling < 1 increases values, so upcast may be needed (and is here) assert_equal(best_write_scale_ftype(arr, lower_t(0.99), 0), higher_t) # Large minus offset on large array can cause upcast - assert_equal(best_write_scale_ftype(arr, 1, -big_delta/2.01), lower_t) - assert_equal(best_write_scale_ftype(arr, 1, -big_delta/2.0), higher_t) + assert_equal(best_write_scale_ftype(arr, 1, -big_delta / 2.01), lower_t) + assert_equal(best_write_scale_ftype(arr, 1, -big_delta / 2.0), higher_t) # With infs already in input, default type returns arr[0] = np.inf assert_equal(best_write_scale_ftype(arr, lower_t(0.5), 0), lower_t) @@ -903,15 +903,15 @@ def test_can_cast(): def test_write_zeros(): bio = BytesIO() write_zeros(bio, 10000) - assert_equal(bio.getvalue(), b'\x00'*10000) + assert_equal(bio.getvalue(), b'\x00' * 10000) bio.seek(0) bio.truncate(0) write_zeros(bio, 10000, 256) - assert_equal(bio.getvalue(), b'\x00'*10000) + assert_equal(bio.getvalue(), b'\x00' * 10000) bio.seek(0) bio.truncate(0) write_zeros(bio, 200, 256) - assert_equal(bio.getvalue(), b'\x00'*200) + assert_equal(bio.getvalue(), b'\x00' * 200) def test_seek_tell(): @@ -938,7 +938,7 @@ def test_seek_tell(): # Files other than BZ2Files can seek forward on write, leaving # zeros in their wake. BZ2Files can't seek when writing, unless # we enable the write0 flag to seek_tell - if not write0 and in_file == 'test.bz2': # Can't seek write in bz2 + if not write0 and in_file == 'test.bz2': # Can't seek write in bz2 # write the zeros by hand for the read test below fobj.write(b'\x00' * diff) else: @@ -985,7 +985,9 @@ def test_seek_tell_logic(): bio = BytesIO() seek_tell(bio, 10) assert_equal(bio.tell(), 10) + class BabyBio(BytesIO): + def seek(self, *args): raise IOError() bio = BabyBio() @@ -994,7 +996,7 @@ def seek(self, *args): # Put fileobj in correct position by writing ZEROB = b'\x00' bio.write(ZEROB * 10) - seek_tell(bio, 10) # already there, nothing to do + seek_tell(bio, 10) # already there, nothing to do assert_equal(bio.tell(), 10) assert_equal(bio.getvalue(), ZEROB * 10) # Try write zeros to get to new position @@ -1085,27 +1087,27 @@ def test_shape_zoom_affine(): shape = (3, 5, 7) zooms = (3, 2, 1) res = shape_zoom_affine(shape, zooms) - exp = np.array([[-3., 0., 0., 3.], - [ 0., 2., 0., -4.], - [ 0., 0., 1., -3.], - [ 0., 0., 0., 1.]]) + exp = np.array([[-3., 0., 0., 3.], + [0., 2., 0., -4.], + [0., 0., 1., -3.], + [0., 0., 0., 1.]]) assert_array_almost_equal(res, exp) res = shape_zoom_affine((3, 5), (3, 2)) - exp = np.array([[-3., 0., 0., 3.], - [ 0., 2., 0., -4.], - [ 0., 0., 1., -0.], - [ 0., 0., 0., 1.]]) + exp = np.array([[-3., 0., 0., 3.], + [0., 2., 0., -4.], + [0., 0., 1., -0.], + [0., 0., 0., 1.]]) assert_array_almost_equal(res, exp) res = shape_zoom_affine(shape, zooms, False) - exp = np.array([[ 3., 0., 0., -3.], - [ 0., 2., 0., -4.], - [ 0., 0., 1., -3.], - [ 0., 0., 0., 1.]]) + exp = np.array([[3., 0., 0., -3.], + [0., 2., 0., -4.], + [0., 0., 1., -3.], + [0., 0., 0., 1.]]) assert_array_almost_equal(res, exp) def test_rec2dict(): - r = np.zeros((), dtype = [('x', 'i4'), ('s', 'S10')]) + r = np.zeros((), dtype=[('x', 'i4'), ('s', 'S10')]) d = rec2dict(r) assert_equal(d, {'x': 0, 's': b''}) @@ -1160,12 +1162,12 @@ def assert_rt(data, shape, out_dtype, order='F', - in_cast = None, - pre_clips = None, - inter = 0., - slope = 1., - post_clips = None, - nan_fill = None): + in_cast=None, + pre_clips=None, + inter=0., + slope=1., + post_clips=None, + nan_fill=None): sio = BytesIO() to_write = data.reshape(shape) # to check that we didn't modify in-place @@ -1186,45 +1188,48 @@ def assert_rt(data, # check shape writing for shape, order in itp( - ((24,), (24, 1), (24, 1, 1), (1, 24), (1, 1, 24), (2, 3, 4), - (6, 1, 4), (1, 6, 4), (6, 4, 1)), - 'FC'): + ((24,), (24, 1), (24, 1, 1), (1, 24), (1, 1, 24), (2, 3, 4), + (6, 1, 4), (1, 6, 4), (6, 4, 1)), + 'FC'): assert_rt(np.arange(24), shape, np.int16, order=order) # check defense against modifying data in-place for in_cast, pre_clips, inter, slope, post_clips, nan_fill in itp( - (None, np.float32), - (None, (-1, 25)), - (0., 1.), - (1., 0.5), - (None, (-2, 49)), - (None, 1)): + (None, np.float32), + (None, (-1, 25)), + (0., 1.), + (1., 0.5), + (None, (-2, 49)), + (None, 1)): data = np.arange(24).astype(np.float32) assert_rt(data, shape, np.int16, - in_cast = in_cast, - pre_clips = pre_clips, - inter = inter, - slope = slope, - post_clips = post_clips, - nan_fill = nan_fill) + in_cast=in_cast, + pre_clips=pre_clips, + inter=inter, + slope=slope, + post_clips=post_clips, + nan_fill=nan_fill) # Check defense against in-place modification with nans present if not nan_fill is None: data[1] = np.nan assert_rt(data, shape, np.int16, - in_cast = in_cast, - pre_clips = pre_clips, - inter = inter, - slope = slope, - post_clips = post_clips, - nan_fill = nan_fill) + in_cast=in_cast, + pre_clips=pre_clips, + inter=inter, + slope=slope, + post_clips=post_clips, + nan_fill=nan_fill) def test_array_from_file_overflow(): # Test for int overflow in size calculation in array_from_file shape = (1500,) * 6 + class NoStringIO: # Null file-like for forcing error + def seek(self, n_bytes): pass + def read(self, n_bytes): return b'' try: diff --git a/nibabel/tests/test_wrapstruct.py b/nibabel/tests/test_wrapstruct.py index ee3bfafde7..3001dac6df 100644 --- a/nibabel/tests/test_wrapstruct.py +++ b/nibabel/tests/test_wrapstruct.py @@ -44,6 +44,7 @@ INTEGER_TYPES = np.sctypes['int'] + np.sctypes['uint'] + class _TestWrapStructBase(TestCase): ''' Class implements base tests for binary headers @@ -103,9 +104,9 @@ def test_to_from_fileobj(self): def test_mappingness(self): hdr = self.header_class() assert_raises(ValueError, - hdr.__setitem__, - 'nonexistent key', - 0.1) + hdr.__setitem__, + 'nonexistent key', + 0.1) hdr_dt = hdr.structarr.dtype keys = hdr.keys() assert_equal(keys, list(hdr)) @@ -177,13 +178,13 @@ def log_chk(self, hdr, level): return hdrc, '', () # Non zero level, test above and below threshold # Logging level above threshold, no log - logger.setLevel(level+1) - e_lev = level+1 + logger.setLevel(level + 1) + e_lev = level + 1 hdrc.check_fix(logger=logger, error_level=e_lev) assert_equal(str_io.getvalue(), '') # Logging level below threshold, log appears - logger.setLevel(level+1) - logger.setLevel(level-1) + logger.setLevel(level + 1) + logger.setLevel(level - 1) hdrc = hdr.copy() hdrc.check_fix(logger=logger, error_level=e_lev) assert_true(str_io.getvalue() != '') @@ -245,7 +246,9 @@ def test_as_byteswapped(self): assert_equal(hdr_bs.endianness, swapped_code) assert_not_equal(hdr.binaryblock, hdr_bs.binaryblock) # Note that contents is not rechecked on swap / copy + class DC(self.header_class): + def check_fix(self, *args, **kwargs): raise Exception # Assumes check=True default @@ -356,7 +359,7 @@ def _chk_string(hdr, fix=False): class MyLabeledWrapStruct(LabeledWrapStruct, MyWrapStruct): - _field_recoders = {} # for recoding values for str + _field_recoders = {} # for recoding values for str class TestMyWrapStruct(_TestWrapStructBase): @@ -424,7 +427,7 @@ def test_log_checks(self): # pretent header defined at the top of this file HC = self.header_class hdr = HC() - hdr['an_integer'] = 2 # severity 40 + hdr['an_integer'] = 2 # severity 40 fhdr, message, raiser = self.log_chk(hdr, 40) return assert_equal(fhdr['an_integer'], 1) @@ -433,10 +436,10 @@ def test_log_checks(self): assert_raises(*raiser) # lower case string hdr = HC() - hdr['a_str'] = 'Hello' # severity = 20 + hdr['a_str'] = 'Hello' # severity = 20 fhdr, message, raiser = self.log_chk(hdr, 20) assert_equal(message, 'a_str should be lower case; ' - 'set a_str to lower case') + 'set a_str to lower case') assert_raises(*raiser) def test_logger_error(self): @@ -450,7 +453,7 @@ def test_logger_error(self): logger.setLevel(20) logger.addHandler(logging.StreamHandler(str_io)) # Prepare something that needs fixing - hdr['a_str'] = 'Fullness' # severity 20 + hdr['a_str'] = 'Fullness' # severity 20 log_cache = imageglobals.logger, imageglobals.error_level try: # Check log message appears in new logger From ab0adb402ae64dfd8ad7a990a71a00e3fd13d9ce Mon Sep 17 00:00:00 2001 From: Ben Cipollini Date: Fri, 5 Feb 2016 10:16:28 -0800 Subject: [PATCH 03/11] STY: autoflake on source code. --- nibabel/benchmarks/bench_array_to_file.py | 2 -- nibabel/benchmarks/bench_finite_range.py | 2 -- nibabel/parrec.py | 1 - nibabel/spatialimages.py | 2 -- nibabel/xmlutils.py | 5 ++--- setup_egg.py | 5 ++--- 6 files changed, 4 insertions(+), 13 deletions(-) diff --git a/nibabel/benchmarks/bench_array_to_file.py b/nibabel/benchmarks/bench_array_to_file.py index a7bbf2e7bc..e627485bb9 100644 --- a/nibabel/benchmarks/bench_array_to_file.py +++ b/nibabel/benchmarks/bench_array_to_file.py @@ -19,8 +19,6 @@ import numpy as np -from ..externals.six import BytesIO -from ..volumeutils import array_to_file from .butils import print_git_title diff --git a/nibabel/benchmarks/bench_finite_range.py b/nibabel/benchmarks/bench_finite_range.py index 886fec2f5e..1d442ed379 100644 --- a/nibabel/benchmarks/bench_finite_range.py +++ b/nibabel/benchmarks/bench_finite_range.py @@ -19,8 +19,6 @@ import numpy as np -from ..externals.six import BytesIO -from ..volumeutils import finite_range from .butils import print_git_title diff --git a/nibabel/parrec.py b/nibabel/parrec.py index 7f73bfc580..c99cf1bbb4 100644 --- a/nibabel/parrec.py +++ b/nibabel/parrec.py @@ -243,7 +243,6 @@ class PARRECError(Exception): To be raised whenever PAR/REC is not happy, or we are not happy with PAR/REC. """ - pass # Value after colon may be absent diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index c684b69e38..088b6aa0e9 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -144,12 +144,10 @@ class HeaderDataError(Exception): ''' Class to indicate error in getting or setting header data ''' - pass class HeaderTypeError(Exception): ''' Class to indicate error in parameters into header functions ''' - pass class SpatialHeader(FileBasedHeader): diff --git a/nibabel/xmlutils.py b/nibabel/xmlutils.py index 11c41c230e..40e2162907 100644 --- a/nibabel/xmlutils.py +++ b/nibabel/xmlutils.py @@ -11,10 +11,10 @@ """ from io import BytesIO -from xml.etree.ElementTree import Element, SubElement, tostring +from xml.etree.ElementTree import Element, SubElement, tostring # flake8: noqa aliasing from xml.parsers.expat import ParserCreate -from .filebasedimages import FileBasedHeader, FileBasedImage +from .filebasedimages import FileBasedHeader class XmlSerializable(object): @@ -32,7 +32,6 @@ def to_xml(self, enc='utf-8'): class XmlBasedHeader(FileBasedHeader, XmlSerializable): """ Basic wrapper around FileBasedHeader and XmlSerializable.""" - pass class XmlParser(object): diff --git a/setup_egg.py b/setup_egg.py index 48aafbc463..b67a2d9405 100644 --- a/setup_egg.py +++ b/setup_egg.py @@ -3,11 +3,10 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """Wrapper to run setup.py using setuptools.""" -import setuptools +import setuptools # flake8: noqa ; needed to monkeypatch dist_utils -################################################################################ +############################################################################### # Call the setup.py script, injecting the setuptools-specific arguments. if __name__ == '__main__': exec(open('setup.py', 'rt').read(), dict(__name__='__main__')) - From debabec27a44f794cdd8ecae28e9203649a52a54 Mon Sep 17 00:00:00 2001 From: Ben Cipollini Date: Fri, 5 Feb 2016 09:21:37 -0800 Subject: [PATCH 04/11] STY: autoflake on test files. --- nibabel/freesurfer/mghformat.py | 2 -- nibabel/freesurfer/tests/test_io.py | 2 +- nibabel/freesurfer/tests/test_mghformat.py | 3 +-- nibabel/gifti/tests/test_gifti.py | 4 ++-- nibabel/gifti/tests/test_giftiio.py | 3 +-- nibabel/gifti/tests/test_parse_gifti_fast.py | 4 ++-- nibabel/nicom/tests/test_csareader.py | 3 +-- nibabel/nicom/tests/test_structreader.py | 3 --- nibabel/nicom/tests/test_utils.py | 1 - nibabel/tests/test_api_validators.py | 1 - nibabel/tests/test_arraywriters.py | 6 +++--- nibabel/tests/test_checkwarns.py | 2 +- nibabel/tests/test_data.py | 6 +++--- nibabel/tests/test_ecat.py | 2 +- nibabel/tests/test_endiancodes.py | 4 ++-- nibabel/tests/test_environment.py | 4 +--- nibabel/tests/test_euler.py | 3 ++- nibabel/tests/test_filehandles.py | 1 - nibabel/tests/test_fileholders.py | 8 ++++---- nibabel/tests/test_files_interface.py | 2 +- nibabel/tests/test_fileslice.py | 4 +++- nibabel/tests/test_fileutils.py | 1 - nibabel/tests/test_keywordonly.py | 3 ++- nibabel/tests/test_minc1.py | 2 +- nibabel/tests/test_minc2.py | 1 - nibabel/tests/test_mriutils.py | 1 - nibabel/tests/test_nifti1.py | 1 - nibabel/tests/test_orientations.py | 5 ++--- nibabel/tests/test_parrec.py | 2 +- nibabel/tests/test_parrec_data.py | 7 ++++--- nibabel/tests/test_round_trip.py | 4 ++-- nibabel/tests/test_scripts.py | 2 +- nibabel/tests/test_spm2analyze.py | 2 +- nibabel/tests/test_testing.py | 3 ++- nibabel/tests/test_tripwire.py | 1 - nibabel/tests/test_wrapstruct.py | 3 +-- 36 files changed, 46 insertions(+), 60 deletions(-) diff --git a/nibabel/freesurfer/mghformat.py b/nibabel/freesurfer/mghformat.py index 83c6af0ef8..743afc90c7 100644 --- a/nibabel/freesurfer/mghformat.py +++ b/nibabel/freesurfer/mghformat.py @@ -68,7 +68,6 @@ class MGHError(Exception): To be raised whenever MGH is not happy, or we are not happy with MGH. """ - pass class MGHHeader(object): @@ -225,7 +224,6 @@ def __ne__(self, other): def check_fix(self): ''' Pass. maybe for now''' - pass def get_affine(self): ''' Get the affine transform from the header information. diff --git a/nibabel/freesurfer/tests/test_io.py b/nibabel/freesurfer/tests/test_io.py index d62d4b6476..b596f98fd7 100644 --- a/nibabel/freesurfer/tests/test_io.py +++ b/nibabel/freesurfer/tests/test_io.py @@ -70,7 +70,7 @@ def test_geometry(): coords2, faces2 = read_geometry(surf_path) with open(surf_path, 'rb') as fobj: - magic = np.fromfile(fobj, ">u1", 3) + np.fromfile(fobj, ">u1", 3) read_create_stamp = fobj.readline().decode().rstrip('\n') assert_equal(create_stamp, read_create_stamp) diff --git a/nibabel/freesurfer/tests/test_mghformat.py b/nibabel/freesurfer/tests/test_mghformat.py index e456d52af3..9683148e5f 100644 --- a/nibabel/freesurfer/tests/test_mghformat.py +++ b/nibabel/freesurfer/tests/test_mghformat.py @@ -10,7 +10,6 @@ import os import io -import gzip import numpy as np @@ -134,7 +133,7 @@ def bad_dtype_mgh(): v = np.ones((7, 13, 3, 22)).astype(np.uint16) # form a MGHImage object using data # and the default affine matrix (Note the "None") - img = MGHImage(v, None) + MGHImage(v, None) def test_bad_dtype_mgh(): diff --git a/nibabel/gifti/tests/test_gifti.py b/nibabel/gifti/tests/test_gifti.py index 2cab52d167..3ae6cb44aa 100644 --- a/nibabel/gifti/tests/test_gifti.py +++ b/nibabel/gifti/tests/test_gifti.py @@ -9,7 +9,7 @@ from nibabel.gifti import (GiftiImage, GiftiDataArray, GiftiLabel, GiftiLabelTable, GiftiMetaData) from nibabel.gifti.gifti import data_tag -from nibabel.nifti1 import data_type_codes, intent_codes +from nibabel.nifti1 import data_type_codes from numpy.testing import (assert_array_almost_equal, assert_array_equal) @@ -179,7 +179,7 @@ def assign_metadata(val): def test_data_tag_deprecated(): - img = GiftiImage() + GiftiImage() with clear_and_catch_warnings() as w: warnings.filterwarnings('once', category=DeprecationWarning) data_tag(np.array([]), 'ASCII', '%i', 1) diff --git a/nibabel/gifti/tests/test_giftiio.py b/nibabel/gifti/tests/test_giftiio.py index 90a87a2d09..5dd8b85b2e 100644 --- a/nibabel/gifti/tests/test_giftiio.py +++ b/nibabel/gifti/tests/test_giftiio.py @@ -23,7 +23,6 @@ class TestGiftiIO(object): def setUp(self): with clear_and_catch_warnings() as w: warnings.simplefilter('always', DeprecationWarning) - import nibabel.gifti.giftiio assert_equal(len(w), 1) @@ -32,5 +31,5 @@ def test_read_deprecated(): warnings.simplefilter('always', DeprecationWarning) from nibabel.gifti.giftiio import read - img = read(DATA_FILE1) + read(DATA_FILE1) assert_equal(len(w), 1) diff --git a/nibabel/gifti/tests/test_parse_gifti_fast.py b/nibabel/gifti/tests/test_parse_gifti_fast.py index 163e050734..d3a425daa9 100644 --- a/nibabel/gifti/tests/test_parse_gifti_fast.py +++ b/nibabel/gifti/tests/test_parse_gifti_fast.py @@ -21,7 +21,7 @@ from nibabel.nifti1 import xform_codes from nibabel.tmpdirs import InTemporaryDirectory -from numpy.testing import assert_array_equal, assert_array_almost_equal +from numpy.testing import assert_array_almost_equal from nose.tools import (assert_true, assert_false, assert_equal, assert_raises) @@ -113,7 +113,7 @@ def test_read_ordering(): def test_load_metadata(): for i, dat in enumerate(datafiles): img = load(dat) - me = img.meta + img.meta assert_equal(numDA[i], img.numDA) assert_equal(img.version, '1.0') diff --git a/nibabel/nicom/tests/test_csareader.py b/nibabel/nicom/tests/test_csareader.py index 33feb7eaee..509d786914 100644 --- a/nibabel/nicom/tests/test_csareader.py +++ b/nibabel/nicom/tests/test_csareader.py @@ -10,7 +10,6 @@ from nose.tools import (assert_true, assert_false, assert_equal, assert_raises) -from numpy.testing import assert_array_equal, assert_array_almost_equal from .test_dicomwrappers import (have_dicom, dicom_test, IO_DATA_PATH, DATA, DATA_FILE) @@ -109,7 +108,7 @@ def test_csa_params(): assert_equal(b_matrix.shape, (3, 3)) # check (by absence of error) that the B matrix is positive # semi-definite. - q = dwp.B2q(b_matrix) + dwp.B2q(b_matrix) b_value = csa.get_b_value(csa_info) assert_equal(b_value, 1000) g_vector = csa.get_g_vector(csa_info) diff --git a/nibabel/nicom/tests/test_structreader.py b/nibabel/nicom/tests/test_structreader.py index 3bca19b9fe..05461d18a0 100644 --- a/nibabel/nicom/tests/test_structreader.py +++ b/nibabel/nicom/tests/test_structreader.py @@ -7,7 +7,6 @@ from nose.tools import (assert_true, assert_false, assert_equal, assert_raises) -from numpy.testing import assert_array_equal, assert_array_almost_equal def test_unpacker(): @@ -17,12 +16,10 @@ def test_unpacker(): if sys.byteorder == 'little': native_int = le_int swapped_int = be_int - native_code = '<' swapped_code = '>' else: native_int = be_int swapped_int = le_int - native_code = '>' swapped_code = '<' up_str = Unpacker(s, endian='<') assert_equal(up_str.read(4), b'1234') diff --git a/nibabel/nicom/tests/test_utils.py b/nibabel/nicom/tests/test_utils.py index 69a77617ef..57ea60754f 100644 --- a/nibabel/nicom/tests/test_utils.py +++ b/nibabel/nicom/tests/test_utils.py @@ -2,7 +2,6 @@ """ import re -import numpy as np from numpy.testing import (assert_almost_equal, assert_array_equal) diff --git a/nibabel/tests/test_api_validators.py b/nibabel/tests/test_api_validators.py index 642498522e..f2dcaa3623 100644 --- a/nibabel/tests/test_api_validators.py +++ b/nibabel/tests/test_api_validators.py @@ -51,7 +51,6 @@ class ValidateAPI(with_metaclass(validator2test)): See :class:`TextValidateSomething` for an example """ - pass class TestValidateSomething(ValidateAPI): diff --git a/nibabel/tests/test_arraywriters.py b/nibabel/tests/test_arraywriters.py index 7ab36268b5..2f06f3a682 100644 --- a/nibabel/tests/test_arraywriters.py +++ b/nibabel/tests/test_arraywriters.py @@ -331,17 +331,17 @@ def test_slope_inter_castable(): data = np.array(arr, dtype=in_dtt) # With scaling but no intercept if slope_only: - aw = SlopeArrayWriter(data, out_dtt) + SlopeArrayWriter(data, out_dtt) else: assert_raises(WriterError, SlopeArrayWriter, data, out_dtt) # With scaling and intercept if slope_inter: - aw = SlopeInterArrayWriter(data, out_dtt) + SlopeInterArrayWriter(data, out_dtt) else: assert_raises(WriterError, SlopeInterArrayWriter, data, out_dtt) # With neither if neither: - aw = ArrayWriter(data, out_dtt) + ArrayWriter(data, out_dtt) else: assert_raises(WriterError, ArrayWriter, data, out_dtt) diff --git a/nibabel/tests/test_checkwarns.py b/nibabel/tests/test_checkwarns.py index fb06507c1d..11c7422326 100644 --- a/nibabel/tests/test_checkwarns.py +++ b/nibabel/tests/test_checkwarns.py @@ -2,7 +2,7 @@ """ from __future__ import division, print_function, absolute_import -from nose.tools import assert_true, assert_equal, assert_raises +from nose.tools import assert_equal from ..testing import clear_and_catch_warnings, suppress_warnings diff --git a/nibabel/tests/test_data.py b/nibabel/tests/test_data.py index 365351f4ed..1091ca18c3 100644 --- a/nibabel/tests/test_data.py +++ b/nibabel/tests/test_data.py @@ -238,7 +238,7 @@ def test_make_datasource(): @raises(DataError) def test_bomber(): b = Bomber('bomber example', 'a message') - res = b.any_attribute + b.any_attribute def test_bomber_inspect(): @@ -265,11 +265,11 @@ def test_datasource_or_bomber(): fobj.write('[DEFAULT]\n') fobj.write('version = 0.2\n') ds = datasource_or_bomber(pkg_def) - fn = ds.get_filename('some_file.txt') + ds.get_filename('some_file.txt') # check that versioning works pkg_def['min version'] = '0.2' ds = datasource_or_bomber(pkg_def) # OK - fn = ds.get_filename('some_file.txt') + ds.get_filename('some_file.txt') pkg_def['min version'] = '0.3' ds = datasource_or_bomber(pkg_def) # not OK yield (assert_raises, diff --git a/nibabel/tests/test_ecat.py b/nibabel/tests/test_ecat.py index 27d81ce77d..00c54e5d5e 100644 --- a/nibabel/tests/test_ecat.py +++ b/nibabel/tests/test_ecat.py @@ -172,7 +172,7 @@ def test_subheader(self): assert_equal(self.subhdr._get_frame_offset(), 1536) dat = self.subhdr.raw_data_from_fileobj() assert_equal(dat.shape, self.subhdr.get_shape()) - scale_factor = self.subhdr.subheaders[0]['scale_factor'] + self.subhdr.subheaders[0]['scale_factor'] assert_equal(self.subhdr.subheaders[0]['scale_factor'].item(), 1.0) ecat_calib_factor = self.hdr['ecat_calibration_factor'] assert_equal(ecat_calib_factor, 25007614.0) diff --git a/nibabel/tests/test_endiancodes.py b/nibabel/tests/test_endiancodes.py index 0e821d1f95..805de0d572 100644 --- a/nibabel/tests/test_endiancodes.py +++ b/nibabel/tests/test_endiancodes.py @@ -10,9 +10,9 @@ import sys -import numpy as np -from nose.tools import assert_raises, assert_true, assert_equal +from nose.tools import assert_equal +from nose.tools import assert_true from ..volumeutils import (endian_codes, native_code, swapped_code) diff --git a/nibabel/tests/test_environment.py b/nibabel/tests/test_environment.py index 6e85b58187..7b02ea866f 100644 --- a/nibabel/tests/test_environment.py +++ b/nibabel/tests/test_environment.py @@ -4,16 +4,14 @@ import os from os import environ as env from os.path import join as pjoin, abspath -import sys -import numpy as np from .. import environment as nibe from numpy.testing import (assert_array_almost_equal, assert_array_equal) -from nose.tools import assert_true, assert_equal, assert_raises +from nose.tools import assert_equal from nose import with_setup diff --git a/nibabel/tests/test_euler.py b/nibabel/tests/test_euler.py index 269c14d475..0d7027222f 100644 --- a/nibabel/tests/test_euler.py +++ b/nibabel/tests/test_euler.py @@ -15,7 +15,8 @@ from .. import eulerangles as nea from .. import quaternions as nq -from nose.tools import assert_true, assert_false, assert_equal +from nose.tools import assert_false +from nose.tools import assert_true from numpy.testing import assert_array_equal, assert_array_almost_equal diff --git a/nibabel/tests/test_filehandles.py b/nibabel/tests/test_filehandles.py index 9c8befeb8d..365a418890 100644 --- a/nibabel/tests/test_filehandles.py +++ b/nibabel/tests/test_filehandles.py @@ -24,7 +24,6 @@ from numpy.testing import (assert_array_almost_equal, assert_array_equal) -from nose.tools import assert_true, assert_equal, assert_raises def test_multiload(): diff --git a/nibabel/tests/test_fileholders.py b/nibabel/tests/test_fileholders.py index 2629be2519..0069eef36a 100644 --- a/nibabel/tests/test_fileholders.py +++ b/nibabel/tests/test_fileholders.py @@ -3,15 +3,15 @@ from ..externals.six import BytesIO -import numpy as np -from ..fileholders import FileHolder, FileHolderError, copy_file_map -from ..tmpdirs import InTemporaryDirectory +from ..fileholders import FileHolder from numpy.testing import (assert_array_almost_equal, assert_array_equal) -from nose.tools import assert_true, assert_false, assert_equal, assert_raises +from nose.tools import assert_equal +from nose.tools import assert_false +from nose.tools import assert_true def test_init(): diff --git a/nibabel/tests/test_files_interface.py b/nibabel/tests/test_files_interface.py index 8470252b6e..788be6a31a 100644 --- a/nibabel/tests/test_files_interface.py +++ b/nibabel/tests/test_files_interface.py @@ -19,7 +19,7 @@ from nose.tools import (assert_true, assert_false, assert_equal, assert_raises) -from numpy.testing import assert_array_equal, assert_array_almost_equal +from numpy.testing import assert_array_equal def test_files_spatialimages(): diff --git a/nibabel/tests/test_fileslice.py b/nibabel/tests/test_fileslice.py index f4af7b3d90..55827c56d2 100644 --- a/nibabel/tests/test_fileslice.py +++ b/nibabel/tests/test_fileslice.py @@ -17,7 +17,9 @@ calc_slicedefs, _simple_fileslice, slice2outax, strided_scalar) -from nose.tools import assert_true, assert_false, assert_equal, assert_raises +from nose.tools import assert_equal +from nose.tools import assert_false +from nose.tools import assert_raises from numpy.testing import assert_array_equal diff --git a/nibabel/tests/test_fileutils.py b/nibabel/tests/test_fileutils.py index 5efa0401a3..63ecc8ee34 100644 --- a/nibabel/tests/test_fileutils.py +++ b/nibabel/tests/test_fileutils.py @@ -9,7 +9,6 @@ """ Testing fileutils module """ -import numpy as np from ..fileutils import read_zt_byte_strings diff --git a/nibabel/tests/test_keywordonly.py b/nibabel/tests/test_keywordonly.py index ba8bce0d94..0ef63d9b13 100644 --- a/nibabel/tests/test_keywordonly.py +++ b/nibabel/tests/test_keywordonly.py @@ -2,7 +2,8 @@ from ..keywordonly import kw_only_func, kw_only_meth -from nose.tools import assert_true, assert_false, assert_equal, assert_raises +from nose.tools import assert_equal +from nose.tools import assert_raises def test_kw_only_func(): diff --git a/nibabel/tests/test_minc1.py b/nibabel/tests/test_minc1.py index 0e8d4e8d47..0bbcd4b510 100644 --- a/nibabel/tests/test_minc1.py +++ b/nibabel/tests/test_minc1.py @@ -25,7 +25,7 @@ from ..minc1 import Minc1File, Minc1Image, MincHeader from nose.tools import (assert_true, assert_equal, assert_false, assert_raises) -from numpy.testing import assert_array_equal, assert_array_almost_equal +from numpy.testing import assert_array_equal from ..tmpdirs import InTemporaryDirectory from ..testing import data_path diff --git a/nibabel/tests/test_minc2.py b/nibabel/tests/test_minc2.py index 3c955a6947..c102a0a581 100644 --- a/nibabel/tests/test_minc2.py +++ b/nibabel/tests/test_minc2.py @@ -20,7 +20,6 @@ from ..minc2 import Minc2File, Minc2Image from nose.tools import (assert_true, assert_equal, assert_false, assert_raises) -from numpy.testing import assert_array_equal, assert_array_almost_equal from ..testing import data_path diff --git a/nibabel/tests/test_mriutils.py b/nibabel/tests/test_mriutils.py index f65f6b2d26..6978d9c253 100644 --- a/nibabel/tests/test_mriutils.py +++ b/nibabel/tests/test_mriutils.py @@ -10,7 +10,6 @@ """ from __future__ import division -import numpy as np from numpy.testing import (assert_almost_equal, assert_array_equal) diff --git a/nibabel/tests/test_nifti1.py b/nibabel/tests/test_nifti1.py index a505e843b0..7248d12ea7 100644 --- a/nibabel/tests/test_nifti1.py +++ b/nibabel/tests/test_nifti1.py @@ -22,7 +22,6 @@ Nifti1Pair, Nifti1Extension, Nifti1Extensions, data_type_codes, extension_codes, slice_order_codes) -from nibabel.openers import ImageOpener from nibabel.spatialimages import HeaderDataError from nibabel.tmpdirs import InTemporaryDirectory from ..freesurfer import load as mghload diff --git a/nibabel/tests/test_orientations.py b/nibabel/tests/test_orientations.py index 096684befb..3e36c3289f 100644 --- a/nibabel/tests/test_orientations.py +++ b/nibabel/tests/test_orientations.py @@ -12,13 +12,13 @@ from nose.tools import assert_true, assert_equal, assert_raises -from numpy.testing import assert_array_equal, assert_array_almost_equal +from numpy.testing import assert_array_equal from ..orientations import (io_orientation, ornt_transform, inv_ornt_aff, flip_axis, apply_orientation, OrientationError, ornt2axcodes, axcodes2ornt, aff2axcodes) -from ..affines import from_matvec, to_matvec +from ..affines import from_matvec IN_ARRS = [np.eye(4), @@ -296,7 +296,6 @@ def test_axcodes2ornt(): def test_aff2axcodes(): - labels = (('left', 'right'), ('back', 'front'), ('down', 'up')) assert_equal(aff2axcodes(np.eye(4)), tuple('RAS')) aff = [[0, 1, 0, 10], [-1, 0, 0, 20], [0, 0, 1, 30], [0, 0, 0, 1]] assert_equal(aff2axcodes(aff, (('L', 'R'), ('B', 'F'), ('D', 'U'))), diff --git a/nibabel/tests/test_parrec.py b/nibabel/tests/test_parrec.py index e03afafb36..604cefc583 100644 --- a/nibabel/tests/test_parrec.py +++ b/nibabel/tests/test_parrec.py @@ -301,7 +301,7 @@ def test_truncated_load(): gen_info, slice_info = parse_PAR_header(fobj) assert_raises(PARRECError, PARRECHeader, gen_info, slice_info) with clear_and_catch_warnings(record=True) as wlist: - hdr = PARRECHeader(gen_info, slice_info, True) + PARRECHeader(gen_info, slice_info, True) assert_equal(len(wlist), 1) diff --git a/nibabel/tests/test_parrec_data.py b/nibabel/tests/test_parrec_data.py index 16de836206..e428f5e974 100644 --- a/nibabel/tests/test_parrec_data.py +++ b/nibabel/tests/test_parrec_data.py @@ -13,7 +13,8 @@ from .nibabel_data import get_nibabel_data, needs_nibabel_data from nose import SkipTest -from nose.tools import assert_true, assert_false, assert_equal +from nose.tools import assert_equal +from nose.tools import assert_true from numpy.testing import assert_almost_equal @@ -59,6 +60,6 @@ def test_fieldmap(): # second is phase. The NIfTI has very odd scaling, being all negative. fieldmap_par = pjoin(BALLS, 'PARREC', 'fieldmap.PAR') fieldmap_nii = pjoin(BALLS, 'NIFTI', 'fieldmap.nii.gz') - pimg = load(fieldmap_par) - nimg = top_load(fieldmap_nii) + load(fieldmap_par) + top_load(fieldmap_nii) raise SkipTest('Fieldmap remains puzzling') diff --git a/nibabel/tests/test_round_trip.py b/nibabel/tests/test_round_trip.py index 0b23bf192e..f60c9e9362 100644 --- a/nibabel/tests/test_round_trip.py +++ b/nibabel/tests/test_round_trip.py @@ -13,7 +13,7 @@ from nose.tools import assert_true -from numpy.testing import assert_array_equal, assert_almost_equal +from numpy.testing import assert_array_equal DEBUG = True @@ -192,5 +192,5 @@ def check_arr(test_id, V_in, in_type, out_type, scaling_type): rel_mx_e, slope, inter) # To help debugging failures with --pdb-failure - fail_i = np.nonzero(all_fails) + np.nonzero(all_fails) assert_true(this_test) diff --git a/nibabel/tests/test_scripts.py b/nibabel/tests/test_scripts.py index 1544a53a97..5346292082 100644 --- a/nibabel/tests/test_scripts.py +++ b/nibabel/tests/test_scripts.py @@ -21,7 +21,7 @@ from nose.tools import (assert_true, assert_false, assert_not_equal, assert_equal) -from numpy.testing import assert_almost_equal, assert_array_equal +from numpy.testing import assert_almost_equal from .scriptrunner import ScriptRunner from .nibabel_data import needs_nibabel_data diff --git a/nibabel/tests/test_spm2analyze.py b/nibabel/tests/test_spm2analyze.py index e3d7d92497..e39e79b96e 100644 --- a/nibabel/tests/test_spm2analyze.py +++ b/nibabel/tests/test_spm2analyze.py @@ -64,4 +64,4 @@ class TestSpm2AnalyzeImage(test_spm99analyze.TestSpm99AnalyzeImage): def test_origin_affine(): # check that origin affine works, only hdr = Spm2AnalyzeHeader() - aff = hdr.get_origin_affine() + hdr.get_origin_affine() diff --git a/nibabel/tests/test_testing.py b/nibabel/tests/test_testing.py index 7a4b3d81ce..0add90454d 100644 --- a/nibabel/tests/test_testing.py +++ b/nibabel/tests/test_testing.py @@ -7,7 +7,8 @@ import numpy as np -from nose.tools import assert_true, assert_equal, assert_raises +from nose.tools import assert_equal +from nose.tools import assert_raises from ..testing import (error_warnings, suppress_warnings, clear_and_catch_warnings, assert_allclose_safely, get_fresh_mod) diff --git a/nibabel/tests/test_tripwire.py b/nibabel/tests/test_tripwire.py index 74e47a2331..05d3b1eb3f 100644 --- a/nibabel/tests/test_tripwire.py +++ b/nibabel/tests/test_tripwire.py @@ -3,7 +3,6 @@ from ..tripwire import TripWire, is_tripwire, TripWireError -from nose import SkipTest from nose.tools import (assert_true, assert_false, assert_raises, assert_equal, assert_not_equal) diff --git a/nibabel/tests/test_wrapstruct.py b/nibabel/tests/test_wrapstruct.py index 3001dac6df..a2f1d6c2de 100644 --- a/nibabel/tests/test_wrapstruct.py +++ b/nibabel/tests/test_wrapstruct.py @@ -24,7 +24,6 @@ _field_recoders -> field_recoders ''' import logging -import warnings import numpy as np from ..externals.six import BytesIO, StringIO @@ -157,7 +156,7 @@ def test_structarr(self): # structarr attribute also read only hdr = self.header_class() # Just check we can get structarr - _ = hdr.structarr + hdr.structarr # That it's read only assert_raises(AttributeError, hdr.__setattr__, 'structarr', 0) From ee23aa23e3e80c9a7baeea04090156829a8852ac Mon Sep 17 00:00:00 2001 From: Ben Cipollini Date: Fri, 5 Feb 2016 09:15:55 -0800 Subject: [PATCH 05/11] update tox --- tox.ini | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tox.ini b/tox.ini index a0002e12b6..ea90c56b79 100644 --- a/tox.ini +++ b/tox.ini @@ -18,3 +18,8 @@ deps = deps = [testenv:np-1.2.1] deps = +[flake8] +max-line-length=100 +ignore=D100,D101,D102,D103,D104,D105,D200,D201,D204,D205,D208,D210,D300,D301,D400,D401,D403,E266,E402,E731,F821,I100,I101,I201,N802,N803,N804,N806 +exclude=*test*,*sphinx*,nibabel/externals/*,*/__init__.py + From 064cb7ff183e965a1d897898ed61e3f70771886b Mon Sep 17 00:00:00 2001 From: Ben Cipollini Date: Fri, 5 Feb 2016 09:51:51 -0800 Subject: [PATCH 06/11] STY: flake8 manual fixes --- nibabel/affines.py | 2 +- nibabel/casting.py | 4 ++-- nibabel/checkwarns.py | 3 ++- nibabel/data.py | 2 +- nibabel/ecat.py | 20 +++++++++++--------- nibabel/fileslice.py | 12 ++++++------ nibabel/freesurfer/mghformat.py | 4 ++-- nibabel/info.py | 5 ++--- nibabel/minc.py | 2 +- nibabel/minc1.py | 2 +- nibabel/mriutils.py | 4 ++-- nibabel/nicom/dicomreaders.py | 2 +- nibabel/nicom/dicomwrappers.py | 4 ++-- nibabel/nifti1.py | 4 ++-- nibabel/onetime.py | 9 ++++----- nibabel/openers.py | 5 +++-- nibabel/optpkg.py | 3 ++- nibabel/parrec.py | 4 ++-- nibabel/quaternions.py | 3 ++- nibabel/spaces.py | 2 +- nibabel/spatialimages.py | 2 +- nibabel/trackvis.py | 8 ++++---- nibabel/volumeutils.py | 22 +++++++++++----------- nibabel/wrapstruct.py | 2 +- 24 files changed, 67 insertions(+), 63 deletions(-) diff --git a/nibabel/affines.py b/nibabel/affines.py index de89612add..5ed07e5e39 100644 --- a/nibabel/affines.py +++ b/nibabel/affines.py @@ -166,7 +166,7 @@ def from_matvec(matrix, vector=None): t = np.zeros((nin + 1, nout + 1), matrix.dtype) t[0:nin, 0:nout] = matrix t[nin, nout] = 1. - if not vector is None: + if vector is not None: t[0:nin, nout] = vector return t diff --git a/nibabel/casting.py b/nibabel/casting.py index 8707f94e9a..a373a91df7 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -269,7 +269,7 @@ def type_info(np_type): # Oh dear, we don't recognize the type information. Try some known types # and then give up. At this stage we're expecting exotic longdouble or # their complex equivalent. - if not np_type in (np.longdouble, np.longcomplex) or width not in (16, 32): + if np_type not in (np.longdouble, np.longcomplex) or width not in (16, 32): raise FloatingError('We had not expected type %s' % np_type) if (vals == (1, 1, 16) and on_powerpc() and _check_maxexp(np.longdouble, 1024)): @@ -439,7 +439,7 @@ def int_to_float(val, flt_type): f : numpy scalar of type `flt_type` """ - if not flt_type is np.longdouble: + if flt_type is not np.longdouble: return flt_type(val) # The following works around a nasty numpy 1.4.1 bug such that: # >>> int(np.uint32(2**32-1) diff --git a/nibabel/checkwarns.py b/nibabel/checkwarns.py index 52c6c718e9..01ef8fd10c 100644 --- a/nibabel/checkwarns.py +++ b/nibabel/checkwarns.py @@ -15,7 +15,8 @@ from .testing import (error_warnings, suppress_warnings) -warnings.warn('The checkwarns module is deprecated and will be removed in nibabel v3.0', FutureWarning) +warnings.warn('The checkwarns module is deprecated and will be removed ' + 'in nibabel v3.0', FutureWarning) class ErrorWarnings(error_warnings): diff --git a/nibabel/data.py b/nibabel/data.py index c56146ed31..e6e191901b 100644 --- a/nibabel/data.py +++ b/nibabel/data.py @@ -298,7 +298,7 @@ def make_datasource(pkg_def, **kwargs): e) if 'name' in pkg_def: msg += '\n\nYou may need the package "%s"' % pkg_def['name'] - if not pkg_hint is None: + if pkg_hint is not None: msg += '\n\n%s' % pkg_hint raise DataError(msg) return VersionedDatasource(pth) diff --git a/nibabel/ecat.py b/nibabel/ecat.py index bd1d7b589f..8dda90eefc 100644 --- a/nibabel/ecat.py +++ b/nibabel/ecat.py @@ -305,16 +305,17 @@ def get_data_dtype(self): def get_patient_orient(self): """ gets orientation of patient based on code stored - in header, not always reliable""" + in header, not always reliable + """ code = self._structarr['patient_orientation'].item() - if not code in self._patient_orient_codes: + if code not in self._patient_orient_codes: raise KeyError('Ecat Orientation CODE %d not recognized' % code) return self._patient_orient_codes[code] def get_filetype(self): """ Type of ECAT Matrix File from code stored in header""" code = self._structarr['file_type'].item() - if not code in self._ft_codes: + if code not in self._ft_codes: raise KeyError('Ecat Filetype CODE %d not recognized' % code) return self._ft_codes[code] @@ -368,7 +369,7 @@ def read_mlist(fileobj, endianness): `nused` in CTI code). """ dt = np.dtype(np.int32) - if not endianness is native_code: + if endianness is not native_code: dt = dt.newbyteorder(endianness) mlists = [] mlist_index = 0 @@ -496,7 +497,7 @@ def read_subheaders(fileobj, mlist, endianness): """ subheaders = [] dt = subhdr_dtype - if not endianness is native_code: + if endianness is not native_code: dt = dt.newbyteorder(endianness) for mat_id, sh_blkno, sh_last_blkno, mat_stat in mlist: if sh_blkno == 0: @@ -630,7 +631,7 @@ def raw_data_from_fileobj(self, frame=0, orientation=None): .. seealso:: data_from_fileobj ''' dtype = self._get_data_dtype(frame) - if not self._header.endianness is native_code: + if self._header.endianness is not native_code: dtype = dtype.newbyteorder(self._header.endianness) shape = self.get_shape(frame) offset = self._get_frame_offset(frame) @@ -700,7 +701,7 @@ def __getitem__(self, sliceobj): """ sliceobj = canonical_slicers(sliceobj, self.shape) # Indices into sliceobj referring to image axes - ax_inds = [i for i, obj in enumerate(sliceobj) if not obj is None] + ax_inds = [i for i, obj in enumerate(sliceobj) if obj is not None] assert len(ax_inds) == len(self.shape) frame_mapping = get_frame_order(self._subheader._mlist) # Analyze index for 4th axis @@ -786,7 +787,7 @@ def __init__(self, dataobj, affine, header, self._subheader = subheader self._mlist = mlist self._dataobj = dataobj - if not affine is None: + if affine is not None: # Check that affine is array-like 4,4. Maybe this is too strict at # this abstract level, but so far I think all image formats we know # do need 4,4. @@ -863,7 +864,8 @@ def _get_fileholders(file_map): @classmethod def from_file_map(klass, file_map): """class method to create image from mapping - specified in file_map""" + specified in file_map + """ hdr_file, img_file = klass._get_fileholders(file_map) # note header and image are in same file hdr_fid = hdr_file.get_prepare_fileobj(mode='rb') diff --git a/nibabel/fileslice.py b/nibabel/fileslice.py index 3ba907e019..d69ab6f3b3 100644 --- a/nibabel/fileslice.py +++ b/nibabel/fileslice.py @@ -92,7 +92,7 @@ def canonical_slicers(sliceobj, shape, check_inds=True): if Ellipsis in remaining: raise ValueError("More than one Ellipsis in slicing " "expression") - real_remaining = [r for r in remaining if not r is None] + real_remaining = [r for r in remaining if r is not None] n_ellided = n_dim - n_real - len(real_remaining) can_slicers.extend((slice(None),) * n_ellided) n_real += n_ellided @@ -144,7 +144,7 @@ def slice2outax(ndim, sliceobj): if isinstance(obj, Integral): out_ax_inds.append(None) continue - if not obj is None: + if obj is not None: out_ax_inds.append(out_ax_no) out_ax_no += 1 return tuple(out_ax_inds) @@ -210,9 +210,9 @@ def fill_slicer(slicer, in_len): start, stop, step = slicer.start, slicer.stop, slicer.step if step is None: step = 1 - if not start is None and start < 0: + if start is not None and start < 0: start = in_len + start - if not stop is None and stop < 0: + if stop is not None and stop < 0: stop = in_len + stop if step > 0: if start is None: @@ -424,7 +424,7 @@ def optimize_slicer(slicer, dim_len, all_full, is_slowest, stride, elif action == 'contiguous': # Cannot be int # If this is already contiguous, default None behavior handles it step = slicer.step - if not step in (-1, 1): + if step not in (-1, 1): if step < 0: slicer = _positive_slice(slicer) return (slice(slicer.start, slicer.stop, 1), @@ -480,7 +480,7 @@ def calc_slicedefs(sliceobj, in_shape, itemsize, offset, order, `segments` and reshaping via `read_shape`. Slices are in terms of `read_shape`. If empty, no new slicing to apply """ - if not order in "CF": + if order not in "CF": raise ValueError("order should be one of 'CF'") sliceobj = canonical_slicers(sliceobj, in_shape) # order fastest changing first (record reordering) diff --git a/nibabel/freesurfer/mghformat.py b/nibabel/freesurfer/mghformat.py index 743afc90c7..efe51c7d5a 100644 --- a/nibabel/freesurfer/mghformat.py +++ b/nibabel/freesurfer/mghformat.py @@ -492,7 +492,7 @@ def from_file_map(klass, file_map, mmap=True): image data file cannot be memory-mapped, ignore `mmap` value and read array from file. ''' - if not mmap in (True, False, 'c', 'r'): + if mmap not in (True, False, 'c', 'r'): raise ValueError("mmap should be one of {True, False, 'c', 'r'}") img_fh = file_map['image'] mghf = img_fh.get_prepare_fileobj('rb') @@ -528,7 +528,7 @@ def from_filename(klass, filename, mmap=True): ------- img : MGHImage instance ''' - if not mmap in (True, False, 'c', 'r'): + if mmap not in (True, False, 'c', 'r'): raise ValueError("mmap should be one of {True, False, 'c', 'r'}") file_map = klass.filespec_to_file_map(filename) return klass.from_file_map(file_map, mmap=mmap) diff --git a/nibabel/info.py b/nibabel/info.py index 6a44560a44..8d2bfb7571 100644 --- a/nibabel/info.py +++ b/nibabel/info.py @@ -11,7 +11,7 @@ _version_minor = 1 _version_micro = 0 _version_extra = 'dev' -#_version_extra = '' +# _version_extra = '' # Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z" __version__ = "%s.%s.%s%s" % (_version_major, @@ -109,8 +109,7 @@ # Main setup parameters NAME = 'nibabel' -MAINTAINER = "Matthew Brett, Michael Hanke, Eric Larson, " \ - "Chris Markiewicz" +MAINTAINER = "Matthew Brett, Michael Hanke, Eric Larson, Chris Markiewicz" MAINTAINER_EMAIL = "neuroimaging@python.org" DESCRIPTION = description LONG_DESCRIPTION = long_description diff --git a/nibabel/minc.py b/nibabel/minc.py index 9ebebea180..94e8da57fc 100644 --- a/nibabel/minc.py +++ b/nibabel/minc.py @@ -7,4 +7,4 @@ FutureWarning, stacklevel=2) -from .minc1 import * +from .minc1 import * # noqa diff --git a/nibabel/minc1.py b/nibabel/minc1.py index 74cc68d40a..25beb7c994 100644 --- a/nibabel/minc1.py +++ b/nibabel/minc1.py @@ -199,7 +199,7 @@ def _normalize(self, data, sliceobj=()): shape = self.get_data_shape() sliceobj = canonical_slicers(sliceobj, shape) # Indices into sliceobj referring to image axes - ax_inds = [i for i, obj in enumerate(sliceobj) if not obj is None] + ax_inds = [i for i, obj in enumerate(sliceobj) if obj is not None] assert len(ax_inds) == len(shape) # Slice imax, imin using same slicer as for data nscales_ax = ax_inds[nscales] diff --git a/nibabel/mriutils.py b/nibabel/mriutils.py index d35b9c90a0..9f58a629c5 100644 --- a/nibabel/mriutils.py +++ b/nibabel/mriutils.py @@ -48,5 +48,5 @@ def calculate_dwell_time(water_fat_shift, echo_train_length, field_strength): if echo_train_length <= 0: raise MRIError("Echo train length should be >= 1") return ((echo_train_length - 1) * water_fat_shift / - (GYROMAGNETIC_RATIO * PROTON_WATER_FAT_SHIFT - * field_strength * (echo_train_length + 1))) + (GYROMAGNETIC_RATIO * PROTON_WATER_FAT_SHIFT * + field_strength * (echo_train_length + 1))) diff --git a/nibabel/nicom/dicomreaders.py b/nibabel/nicom/dicomreaders.py index 38a4bdd729..4c06d10db8 100644 --- a/nibabel/nicom/dicomreaders.py +++ b/nibabel/nicom/dicomreaders.py @@ -193,7 +193,7 @@ def _third_pass(wrappers): out_vol_lists = [vol_list] for dw in wrappers[1:]: z = dw.slice_indicator - if not z in these_zs: + if z not in these_zs: # same volume vol_list.append(dw) these_zs.append(z) diff --git a/nibabel/nicom/dicomwrappers.py b/nibabel/nicom/dicomwrappers.py index c4874d2431..e0e06a1c70 100644 --- a/nibabel/nicom/dicomwrappers.py +++ b/nibabel/nicom/dicomwrappers.py @@ -278,7 +278,7 @@ def series_signature(self): def __getitem__(self, key): """ Return values from DICOM object""" - if not key in self.dcm_data: + if key not in self.dcm_data: raise KeyError('"%s" not in self.dcm_data' % key) return self.dcm_data.get(key) @@ -646,7 +646,7 @@ def series_signature(self): """ Add ICE dims from CSA header to signature """ signature = super(SiemensWrapper, self).series_signature ice = csar.get_ice_dims(self.csa_header) - if not ice is None: + if ice is not None: ice = ice[:6] + ice[8:9] signature['ICE_Dims'] = (ice, lambda x, y: x == y) return signature diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index 144f600198..5f85ca7783 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -716,7 +716,7 @@ def get_data_shape(self): return shape def set_data_shape(self, shape): - ''' Set shape of data + ''' Set shape of data # noqa If ``ndims == len(shape)`` then we set zooms for dimensions higher than ``ndims`` to 1.0 @@ -1644,7 +1644,7 @@ def __init__(self, dataobj, affine, header=None, extra, file_map) # Force set of s/q form when header is None unless affine is also None - if header is None and not affine is None: + if header is None and affine is not None: self._affine2header() # Copy docstring __init__.doc = analyze.AnalyzeImage.__init__.__doc__ diff --git a/nibabel/onetime.py b/nibabel/onetime.py index f5947e92fd..1f410b9a1e 100644 --- a/nibabel/onetime.py +++ b/nibabel/onetime.py @@ -21,9 +21,9 @@ """ from __future__ import division, print_function, absolute_import -#----------------------------------------------------------------------------- +# ----------------------------------------------------------------------------- # Classes and Functions -#----------------------------------------------------------------------------- +# ----------------------------------------------------------------------------- class ResetMixin(object): @@ -130,7 +130,6 @@ def __init__(self, func): def __get__(self, obj, type=None): """This will be called on attribute access on the class or instance.""" - if obj is None: # Being called on the class, return the original function. This # way, introspection works on the class. @@ -173,9 +172,9 @@ def auto_attr(func): return OneTimeProperty(func) -#----------------------------------------------------------------------------- +# ----------------------------------------------------------------------------- # Deprecated API -#----------------------------------------------------------------------------- +# ----------------------------------------------------------------------------- # For backwards compatibility setattr_on_read = auto_attr diff --git a/nibabel/openers.py b/nibabel/openers.py index fafde811b1..abed97afcf 100644 --- a/nibabel/openers.py +++ b/nibabel/openers.py @@ -26,7 +26,8 @@ class BufferedGzipFile(gzip.GzipFile): in Python 3.5.0. This works around a known issue in Python 3.5. - See https://bugs.python.org/issue25626""" + See https://bugs.python.org/issue25626 + """ # This helps avoid defining readinto in Python 2.6, # where it is undefined on gzip.GzipFile. @@ -114,7 +115,7 @@ def __init__(self, fileish, *args, **kwargs): n_args = len(args) full_kwargs.update(dict(zip(arg_names[:n_args], args))) # Set default mode - if not 'mode' in full_kwargs: + if 'mode' not in full_kwargs: kwargs['mode'] = 'rb' if 'compresslevel' in arg_names and 'compresslevel' not in kwargs: kwargs['compresslevel'] = self.default_compresslevel diff --git a/nibabel/optpkg.py b/nibabel/optpkg.py index 29a95c15f3..bbff65365b 100644 --- a/nibabel/optpkg.py +++ b/nibabel/optpkg.py @@ -51,7 +51,8 @@ def optional_package(name, trip_msg=None): >>> pkg.some_function() #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... - TripWireError: We need package not_a_package for these functions, but ``import not_a_package`` raised an ImportError + TripWireError: We need package not_a_package for these functions, + but ``import not_a_package`` raised an ImportError If the module does exist - we get the module diff --git a/nibabel/parrec.py b/nibabel/parrec.py index c99cf1bbb4..5e85c411f5 100644 --- a/nibabel/parrec.py +++ b/nibabel/parrec.py @@ -15,7 +15,7 @@ versions could probably be supported, but we need example images to test against. If you want us to support another version, and have an image we can add to the test suite, let us know. You would make us very happy by submitting -a pull request. +a pull request. # noqa ############### PAR file format @@ -406,7 +406,7 @@ def _err_or_warn(msg): warnings.warn(msg) def _chk_trunc(idef_name, gdef_max_name): - if not gdef_max_name in general_info: + if gdef_max_name not in general_info: return id_values = image_defs[idef_name + ' number'] n_have = len(set(id_values)) diff --git a/nibabel/quaternions.py b/nibabel/quaternions.py index 7c8c193297..adc2367238 100644 --- a/nibabel/quaternions.py +++ b/nibabel/quaternions.py @@ -318,7 +318,8 @@ def rotate_vector(v, q): Notes ----- - See: https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation#Describing_rotations_with_quaternions + See: + https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation#Describing_rotations_with_quaternions ''' varr = np.zeros((4,)) diff --git a/nibabel/spaces.py b/nibabel/spaces.py index 3911b6d4be..393a8a216f 100644 --- a/nibabel/spaces.py +++ b/nibabel/spaces.py @@ -77,7 +77,7 @@ def vox2out_vox(mapped_voxels, voxel_sizes=None): if n_axes < 3: in_shape += (1,) * (3 - n_axes) out_vox = np.ones((3,)) - if not voxel_sizes is None: + if voxel_sizes is not None: if not len(voxel_sizes) == n_axes: raise ValueError('voxel sizes length should match shape') if not np.all(np.array(voxel_sizes) > 0): diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index 088b6aa0e9..23ab9c9c2d 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -138,7 +138,7 @@ import numpy as np from .filebasedimages import FileBasedHeader, FileBasedImage -from .filebasedimages import ImageFileError # needed for back-compat. +from .filebasedimages import ImageFileError # flake8: noqa; for back-compat from .volumeutils import shape_zoom_affine diff --git a/nibabel/trackvis.py b/nibabel/trackvis.py index 89e3d3f1b0..58d4639cb8 100644 --- a/nibabel/trackvis.py +++ b/nibabel/trackvis.py @@ -214,7 +214,7 @@ def track_gen(): # Short of bytes, should we raise an error or continue? actual_n_pts = int(len(pts_str) / pt_size) if actual_n_pts != n_pts: - if strict == True: + if strict: raise DataError('Expecting {0} points for stream {1}, ' 'found {2}'.format( n_pts, n_streams, actual_n_pts)) @@ -371,13 +371,13 @@ def write(fileobj, streamlines, hdr_mapping=None, endianness=None, # Get number of scalars and properties pts, scalars, props = streams0 # calculate number of scalars - if not scalars is None: + if scalars is not None: n_s = scalars.shape[1] else: n_s = 0 hdr['n_scalars'] = n_s # calculate number of properties - if not props is None: + if props is not None: n_p = props.size hdr['n_properties'] = n_p else: @@ -816,7 +816,7 @@ def __init__(self, self.endianness = endianness self.filename = filename self.points_space = points_space - if not affine is None: + if affine is not None: self.set_affine(affine, pos_vox=True, set_order=True) @classmethod diff --git a/nibabel/volumeutils.py b/nibabel/volumeutils.py index 5fa08b78c5..dcdb083f50 100644 --- a/nibabel/volumeutils.py +++ b/nibabel/volumeutils.py @@ -357,7 +357,7 @@ def make_dt_codes(codes_seqs): ''' fields = ['code', 'label', 'type'] len0 = len(codes_seqs[0]) - if not len0 in (3, 4): + if len0 not in (3, 4): raise ValueError('Sequences must be length 3 or 4') if len0 == 4: fields.append('niistring') @@ -487,7 +487,7 @@ def array_from_file(shape, in_dtype, infile, offset=0, order='F', mmap=True): >>> np.all(arr == arr2) True ''' - if not mmap in (True, False, 'c', 'r', 'r+'): + if mmap not in (True, False, 'c', 'r', 'r+'): raise ValueError("mmap value should be one of True, False, 'c', " "'r', 'r+'") if mmap is True: @@ -634,8 +634,8 @@ def array_to_file(data, fileobj, out_dtype=None, offset=0, out_dtype = np.dtype(out_dtype) if offset is not None: seek_tell(fileobj, offset) - if (div_none or (mn, mx) == (0, 0) or ((mn is not None and mx is not None) - and mx < mn)): + if (div_none or (mn, mx) == (0, 0) or + ((mn is not None and mx is not None) and mx < mn)): write_zeros(fileobj, data.size * out_dtype.itemsize) return if order not in 'FC': @@ -808,17 +808,17 @@ def _write_data(data, nan_need_copy = ((pre_clips, in_cast, inter, slope, post_clips) == (None, None, 0, 1, None)) for dslice in data: # cycle over first dimension to save memory - if not pre_clips is None: + if pre_clips is not None: dslice = np.clip(dslice, *pre_clips) - if not in_cast is None: + if in_cast is not None: dslice = dslice.astype(in_cast) if inter != 0.0: dslice = dslice - inter if slope != 1.0: dslice = dslice / slope - if not post_clips is None: + if post_clips is not None: dslice = np.clip(np.rint(dslice), *post_clips) - if not nan_fill is None: + if nan_fill is not None: nans = np.isnan(dslice) if np.any(nans): if nan_need_copy: @@ -1047,7 +1047,7 @@ def calculate_scale(data, out_dtype, allow_intercept): mn, mx = writer.finite_range() if (mn, mx) == (np.inf, -np.inf): # No valid data return (None, None, None, None) - if not in_dtype.kind in 'fc': + if in_dtype.kind not in 'fc': mn, mx = (None, None) return get_slope_inter(writer) + (mn, mx) @@ -1300,7 +1300,7 @@ def better_float_of(first, second, default=np.float32): second = np.dtype(second) default = np.dtype(default).type kinds = (first.kind, second.kind) - if not 'f' in kinds: + if 'f' not in kinds: return default if kinds == ('f', 'f'): if first.itemsize >= second.itemsize: @@ -1316,7 +1316,7 @@ def _ftype4scaled_finite(tst_arr, slope, inter, direction='read', """ Smallest float type for scaling of `tst_arr` that does not overflow """ assert direction in ('read', 'write') - if not default in OK_FLOATS and default is np.longdouble: + if default not in OK_FLOATS and default is np.longdouble: # Omitted longdouble return default def_ind = OK_FLOATS.index(default) diff --git a/nibabel/wrapstruct.py b/nibabel/wrapstruct.py index 52e8a5e4cc..8d940ef68d 100644 --- a/nibabel/wrapstruct.py +++ b/nibabel/wrapstruct.py @@ -522,7 +522,7 @@ def get_value_label(self, fieldname): >>> hdr.get_value_label('datatype') 'two' ''' - if not fieldname in self._field_recoders: + if fieldname not in self._field_recoders: raise ValueError('%s not a coded field' % fieldname) code = int(self._structarr[fieldname]) try: From 54aadea837e8c099c9a1d939d28dae6c81cc99fe Mon Sep 17 00:00:00 2001 From: Ben Cipollini Date: Fri, 5 Feb 2016 09:54:59 -0800 Subject: [PATCH 07/11] BF: fix bug in mriutils use of __all__ --- nibabel/mriutils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/mriutils.py b/nibabel/mriutils.py index 9f58a629c5..0f27544fae 100644 --- a/nibabel/mriutils.py +++ b/nibabel/mriutils.py @@ -11,7 +11,7 @@ """ from __future__ import division -__all__ = ['dwell_time'] +__all__ = ['calculate_dwell_time'] GYROMAGNETIC_RATIO = 42.576 # MHz/T for hydrogen nucleus PROTON_WATER_FAT_SHIFT = 3.4 # ppm From abf827afb7b2a654eb7b81143a7a34ceeedc4f3e Mon Sep 17 00:00:00 2001 From: Ben Cipollini Date: Fri, 5 Feb 2016 11:29:02 -0800 Subject: [PATCH 08/11] STY: manual fixes to autopep8 --- nibabel/volumeutils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nibabel/volumeutils.py b/nibabel/volumeutils.py index dcdb083f50..d02d0d63a5 100644 --- a/nibabel/volumeutils.py +++ b/nibabel/volumeutils.py @@ -1543,9 +1543,9 @@ class BinOpener(Opener): __doc__ = Opener.__doc__ def __init__(self, *args, **kwargs): - warnings.warn("Please use %s class instead of %s" % ( - Opener.__class__.__name__, - self.__class__.__name__), + warnings.warn( + "Please use %s class instead of %s" % (Opener.__class__.__name__, + self.__class__.__name__), DeprecationWarning, stacklevel=2) return super(BinOpener, self).__init__(*args, **kwargs) From 6c8e136eb9d39134ac6bf541669312c3c90b5404 Mon Sep 17 00:00:00 2001 From: Ben Cipollini Date: Fri, 5 Feb 2016 18:07:42 -0800 Subject: [PATCH 09/11] TST: add flake8 of core nibabel code to TravisCI --- .travis.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 94efe063c6..fcbacad978 100644 --- a/.travis.yml +++ b/.travis.yml @@ -62,8 +62,8 @@ before_install: - virtualenv --python=python venv - source venv/bin/activate - python --version # just to check - - pip install -U pip # upgrade to latest pip to find 3.5 wheels - - retry pip install nose # always + - pip install -U pip wheel # upgrade to latest pip find 3.5 wheels; wheel to avoid errors + - retry pip install nose flake8 # always - wheelhouse_pip_install $DEPENDS # pydicom <= 0.9.8 doesn't install on python 3 - if [ "${TRAVIS_PYTHON_VERSION:0:1}" == "2" ]; then @@ -98,6 +98,8 @@ install: - export NIBABEL_DATA_DIR="$PWD/nibabel-data" # command to run tests, e.g. python setup.py test script: + # Run styles first, only on core nibabel code. + - flake8 nibabel # Change into an innocuous directory and find tests from installation - mkdir for_testing - cd for_testing From ae819f1e8764b68a50bae93a85f7ea20e52aa2c0 Mon Sep 17 00:00:00 2001 From: Ben Cipollini Date: Fri, 5 Feb 2016 23:02:45 -0800 Subject: [PATCH 10/11] TST: remove unnecesary checks, document necessary checks --- nibabel/gifti/tests/test_gifti.py | 1 - nibabel/nicom/tests/test_csareader.py | 2 +- nibabel/tests/test_data.py | 2 +- nibabel/tests/test_ecat.py | 1 - 4 files changed, 2 insertions(+), 4 deletions(-) diff --git a/nibabel/gifti/tests/test_gifti.py b/nibabel/gifti/tests/test_gifti.py index 3ae6cb44aa..b572db0c85 100644 --- a/nibabel/gifti/tests/test_gifti.py +++ b/nibabel/gifti/tests/test_gifti.py @@ -179,7 +179,6 @@ def assign_metadata(val): def test_data_tag_deprecated(): - GiftiImage() with clear_and_catch_warnings() as w: warnings.filterwarnings('once', category=DeprecationWarning) data_tag(np.array([]), 'ASCII', '%i', 1) diff --git a/nibabel/nicom/tests/test_csareader.py b/nibabel/nicom/tests/test_csareader.py index 509d786914..ffef46ae44 100644 --- a/nibabel/nicom/tests/test_csareader.py +++ b/nibabel/nicom/tests/test_csareader.py @@ -108,7 +108,7 @@ def test_csa_params(): assert_equal(b_matrix.shape, (3, 3)) # check (by absence of error) that the B matrix is positive # semi-definite. - dwp.B2q(b_matrix) + dwp.B2q(b_matrix) # no error b_value = csa.get_b_value(csa_info) assert_equal(b_value, 1000) g_vector = csa.get_g_vector(csa_info) diff --git a/nibabel/tests/test_data.py b/nibabel/tests/test_data.py index 1091ca18c3..fbb225838d 100644 --- a/nibabel/tests/test_data.py +++ b/nibabel/tests/test_data.py @@ -238,7 +238,7 @@ def test_make_datasource(): @raises(DataError) def test_bomber(): b = Bomber('bomber example', 'a message') - b.any_attribute + b.any_attribute # no error def test_bomber_inspect(): diff --git a/nibabel/tests/test_ecat.py b/nibabel/tests/test_ecat.py index 00c54e5d5e..4e304a6ae7 100644 --- a/nibabel/tests/test_ecat.py +++ b/nibabel/tests/test_ecat.py @@ -172,7 +172,6 @@ def test_subheader(self): assert_equal(self.subhdr._get_frame_offset(), 1536) dat = self.subhdr.raw_data_from_fileobj() assert_equal(dat.shape, self.subhdr.get_shape()) - self.subhdr.subheaders[0]['scale_factor'] assert_equal(self.subhdr.subheaders[0]['scale_factor'].item(), 1.0) ecat_calib_factor = self.hdr['ecat_calibration_factor'] assert_equal(ecat_calib_factor, 25007614.0) From 54ef609c1e9cce11fd4ac582c3517c9e0b42f2db Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Sat, 6 Feb 2016 11:23:34 -0500 Subject: [PATCH 11/11] TST: Run style tests separately --- .travis.yml | 40 +++++++++++++++++++++++++--------------- 1 file changed, 25 insertions(+), 15 deletions(-) diff --git a/.travis.yml b/.travis.yml index fcbacad978..2f010533e2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -57,6 +57,12 @@ matrix: - python: 2.7 env: - INSTALL_TYPE=requirements + - python: 2.7 + env: + - STYLE=1 + - python: 3.5 + env: + - STYLE=1 before_install: - source tools/travis_tools.sh - virtualenv --python=python venv @@ -98,22 +104,26 @@ install: - export NIBABEL_DATA_DIR="$PWD/nibabel-data" # command to run tests, e.g. python setup.py test script: - # Run styles first, only on core nibabel code. - - flake8 nibabel - # Change into an innocuous directory and find tests from installation - - mkdir for_testing - - cd for_testing - - if [ "${COVERAGE}" == "1" ]; then - cp ../.coveragerc .; - COVER_ARGS="--with-coverage --cover-package nibabel"; - fi - - if [ "$DOC_DOC_TEST" == "1" ]; then - pip install sphinx numpydoc texext; - cd ../doc; - make html; - make doctest; + - | + if [ "${STYLE}" == "1" ]; then + # Run styles only on core nibabel code. + flake8 nibabel else - nosetests --with-doctest $COVER_ARGS nibabel; + # Change into an innocuous directory and find tests from installation + mkdir for_testing + cd for_testing + if [ "${COVERAGE}" == "1" ]; then + cp ../.coveragerc .; + COVER_ARGS="--with-coverage --cover-package nibabel"; + fi + if [ "$DOC_DOC_TEST" == "1" ]; then + pip install sphinx numpydoc texext; + cd ../doc; + make html; + make doctest; + else + nosetests --with-doctest $COVER_ARGS nibabel; + fi fi after_success: - if [ "${COVERAGE}" == "1" ]; then coveralls; fi