diff --git a/ibllib/oneibl/data_handlers.py b/ibllib/oneibl/data_handlers.py index 6ff2f4954..8613599d4 100644 --- a/ibllib/oneibl/data_handlers.py +++ b/ibllib/oneibl/data_handlers.py @@ -691,7 +691,7 @@ def __init__(self, session_path, signatures, one=None): :param signatures: input and output file signatures :param one: ONE instance """ - from one.remote.globus import Globus, get_lab_from_endpoint_id # noqa + from one.remote.globus import Globus # noqa super().__init__(session_path, signatures, one=one) self.globus = Globus(client_name='server', headless=True) @@ -807,13 +807,13 @@ def __init__(self, session_path, signature, one=None): """ super().__init__(session_path, signature, one=one) - def setUp(self, **_): + def setUp(self, check_hash=True,**_): """ Function to download necessary data to run tasks using ONE :return: """ df = super().getData() - self.one._check_filesystem(df) + self.one._check_filesystem(df, check_hash=check_hash) def uploadData(self, outputs, version, **kwargs): """ diff --git a/ibllib/oneibl/registration.py b/ibllib/oneibl/registration.py index c99b8ae54..9688dbeda 100644 --- a/ibllib/oneibl/registration.py +++ b/ibllib/oneibl/registration.py @@ -492,6 +492,8 @@ def _get_session_performance(md, ses_data): n_trials = [] n_correct = [] for data, settings in filter(all, zip(ses_data, md)): + if 'trial_num' not in data: + continue # Skip if no trial_num key in task data # In some protocols trials start from 0, in others, from 1 n = data[-1]['trial_num'] + int(data[0]['trial_num'] == 0) # +1 if starts from 0 n_trials.append(n) @@ -501,7 +503,7 @@ def _get_session_performance(md, ses_data): if 'habituationChoiceWorld' in settings.get('PYBPOD_PROTOCOL', ''): n_correct.append(0) else: - n_correct.append(data[-1].get('ntrials_correct', sum(x['trial_correct'] for x in data))) + n_correct.append(data[-1].get('ntrials_correct', sum(x.get('trial_correct', 0) for x in data))) return sum(n_trials), sum(n_correct) diff --git a/ibllib/pipes/dynamic_pipeline.py b/ibllib/pipes/dynamic_pipeline.py index a7f64e8e4..438aca403 100644 --- a/ibllib/pipes/dynamic_pipeline.py +++ b/ibllib/pipes/dynamic_pipeline.py @@ -199,7 +199,7 @@ def _get_trials_tasks(session_path, acquisition_description=None, sync_tasks=Non """ Generate behaviour tasks from acquisition description. - This returns all behaviour related tasks including TrialsRegisterRaw and TrainingStatus objects. + This returns all behaviour related tasks including TrialRegisterRaw and TrainingStatus objects. Parameters ---------- diff --git a/ibllib/pipes/mesoscope_tasks.py b/ibllib/pipes/mesoscope_tasks.py index ba97ab4e8..0430a14e0 100644 --- a/ibllib/pipes/mesoscope_tasks.py +++ b/ibllib/pipes/mesoscope_tasks.py @@ -111,14 +111,14 @@ def tearDown(self): _logger.setLevel(self._log_level or logging.INFO) return super().tearDown() - def _run(self, remove_uncompressed=False, verify_output=True, overwrite=False, **kwargs): + def _run(self, remove_uncompressed=True, verify_output=True, overwrite=False, **kwargs): """ Run tar compression on all tif files in the device collection. Parameters ---------- remove_uncompressed: bool - Whether to remove the original, uncompressed data. Default is False. + Whether to remove the original, uncompressed data. Default is True. verify_output: bool Whether to check that the compressed tar file can be uncompressed without errors. Default is True. @@ -224,10 +224,11 @@ def setUp(self, **kwargs): """ self.overwrite = kwargs.get('overwrite', False) all_files_present = super().setUp(**kwargs) # Ensure files present - bin_sig, = dataset_from_name('data.bin', self.input_files) - renamed_bin_sig, = dataset_from_name('imaging.frames_motionRegistered.bin', self.input_files) - if not self.overwrite and (bin_sig | renamed_bin_sig).find_files(self.session_path)[0]: - return all_files_present # We have local bin files; no need to extract tifs + if not self.overwrite: + bin_sig = dataset_from_name('data.bin', self.input_files)[0] + renamed_bin_sig = dataset_from_name('imaging.frames_motionRegistered.bin', self.input_files)[0] + if (bin_sig | renamed_bin_sig).find_files(self.session_path)[0]: + return all_files_present # We have local bin files; no need to extract tifs tif_sig = dataset_from_name('*.tif', self.input_files) if not tif_sig: return all_files_present # No tifs in the signature; just return @@ -269,10 +270,10 @@ def signature(self): # The number of in and outputs will be dependent on the number of input raw imaging folders and output FOVs I = ExpectedDataset.input # noqa signature = { - 'input_files': [('_ibl_rawImagingData.meta.json', self.device_collection, True), + 'input_files': [I('_ibl_rawImagingData.meta.json', self.device_collection, True, unique=False), I('*.tif', self.device_collection, True) | I('imaging.frames.tar.bz2', self.device_collection, True, unique=False), - ('exptQC.mat', self.device_collection, False)], + I('exptQC.mat', self.device_collection, False)], 'output_files': [('mpci.ROIActivityF.npy', 'alf/FOV*', True), ('mpci.ROINeuropilActivityF.npy', 'alf/FOV*', True), ('mpci.ROIActivityDeconvolved.npy', 'alf/FOV*', True), @@ -1050,6 +1051,8 @@ def roi_mlapdv(self, nFOV: int, suffix=None): MLAPDV coordinates are in um relative to bregma. Location IDs are from the 2017 Allen common coordinate framework atlas. + + FIXME stackPos Y, X (not X, Y) - may affect xy loc Parameters ----------