Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions ibllib/oneibl/data_handlers.py
Original file line number Diff line number Diff line change
Expand Up @@ -691,7 +691,7 @@ def __init__(self, session_path, signatures, one=None):
:param signatures: input and output file signatures
:param one: ONE instance
"""
from one.remote.globus import Globus, get_lab_from_endpoint_id # noqa
from one.remote.globus import Globus # noqa
super().__init__(session_path, signatures, one=one)
self.globus = Globus(client_name='server', headless=True)

Expand Down Expand Up @@ -807,13 +807,13 @@ def __init__(self, session_path, signature, one=None):
"""
super().__init__(session_path, signature, one=one)

def setUp(self, **_):
def setUp(self, check_hash=True,**_):
"""
Function to download necessary data to run tasks using ONE
:return:
"""
df = super().getData()
self.one._check_filesystem(df)
self.one._check_filesystem(df, check_hash=check_hash)

def uploadData(self, outputs, version, **kwargs):
"""
Expand Down
4 changes: 3 additions & 1 deletion ibllib/oneibl/registration.py
Original file line number Diff line number Diff line change
Expand Up @@ -492,6 +492,8 @@ def _get_session_performance(md, ses_data):
n_trials = []
n_correct = []
for data, settings in filter(all, zip(ses_data, md)):
if 'trial_num' not in data:
continue # Skip if no trial_num key in task data
# In some protocols trials start from 0, in others, from 1
n = data[-1]['trial_num'] + int(data[0]['trial_num'] == 0) # +1 if starts from 0
n_trials.append(n)
Expand All @@ -501,7 +503,7 @@ def _get_session_performance(md, ses_data):
if 'habituationChoiceWorld' in settings.get('PYBPOD_PROTOCOL', ''):
n_correct.append(0)
else:
n_correct.append(data[-1].get('ntrials_correct', sum(x['trial_correct'] for x in data)))
n_correct.append(data[-1].get('ntrials_correct', sum(x.get('trial_correct', 0) for x in data)))

return sum(n_trials), sum(n_correct)

Expand Down
2 changes: 1 addition & 1 deletion ibllib/pipes/dynamic_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ def _get_trials_tasks(session_path, acquisition_description=None, sync_tasks=Non
"""
Generate behaviour tasks from acquisition description.

This returns all behaviour related tasks including TrialsRegisterRaw and TrainingStatus objects.
This returns all behaviour related tasks including TrialRegisterRaw and TrainingStatus objects.

Parameters
----------
Expand Down
19 changes: 11 additions & 8 deletions ibllib/pipes/mesoscope_tasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,14 +111,14 @@ def tearDown(self):
_logger.setLevel(self._log_level or logging.INFO)
return super().tearDown()

def _run(self, remove_uncompressed=False, verify_output=True, overwrite=False, **kwargs):
def _run(self, remove_uncompressed=True, verify_output=True, overwrite=False, **kwargs):
"""
Run tar compression on all tif files in the device collection.

Parameters
----------
remove_uncompressed: bool
Whether to remove the original, uncompressed data. Default is False.
Whether to remove the original, uncompressed data. Default is True.
verify_output: bool
Whether to check that the compressed tar file can be uncompressed without errors.
Default is True.
Expand Down Expand Up @@ -224,10 +224,11 @@ def setUp(self, **kwargs):
"""
self.overwrite = kwargs.get('overwrite', False)
all_files_present = super().setUp(**kwargs) # Ensure files present
bin_sig, = dataset_from_name('data.bin', self.input_files)
renamed_bin_sig, = dataset_from_name('imaging.frames_motionRegistered.bin', self.input_files)
if not self.overwrite and (bin_sig | renamed_bin_sig).find_files(self.session_path)[0]:
return all_files_present # We have local bin files; no need to extract tifs
if not self.overwrite:
bin_sig = dataset_from_name('data.bin', self.input_files)[0]
renamed_bin_sig = dataset_from_name('imaging.frames_motionRegistered.bin', self.input_files)[0]
if (bin_sig | renamed_bin_sig).find_files(self.session_path)[0]:
return all_files_present # We have local bin files; no need to extract tifs
tif_sig = dataset_from_name('*.tif', self.input_files)
if not tif_sig:
return all_files_present # No tifs in the signature; just return
Expand Down Expand Up @@ -269,10 +270,10 @@ def signature(self):
# The number of in and outputs will be dependent on the number of input raw imaging folders and output FOVs
I = ExpectedDataset.input # noqa
signature = {
'input_files': [('_ibl_rawImagingData.meta.json', self.device_collection, True),
'input_files': [I('_ibl_rawImagingData.meta.json', self.device_collection, True, unique=False),
I('*.tif', self.device_collection, True) |
I('imaging.frames.tar.bz2', self.device_collection, True, unique=False),
('exptQC.mat', self.device_collection, False)],
I('exptQC.mat', self.device_collection, False)],
'output_files': [('mpci.ROIActivityF.npy', 'alf/FOV*', True),
('mpci.ROINeuropilActivityF.npy', 'alf/FOV*', True),
('mpci.ROIActivityDeconvolved.npy', 'alf/FOV*', True),
Expand Down Expand Up @@ -1050,6 +1051,8 @@ def roi_mlapdv(self, nFOV: int, suffix=None):

MLAPDV coordinates are in um relative to bregma. Location IDs are from the 2017 Allen
common coordinate framework atlas.

FIXME stackPos Y, X (not X, Y) - may affect xy loc

Parameters
----------
Expand Down
Loading