Skip to content

Commit

Permalink
Merge branch 'develop' into wheel_alignment
Browse files Browse the repository at this point in the history
  • Loading branch information
mayofaulkner committed Oct 11, 2023
2 parents 6e59101 + 2803838 commit 65aa9a4
Show file tree
Hide file tree
Showing 19 changed files with 1,119 additions and 402 deletions.
659 changes: 514 additions & 145 deletions brainbox/behavior/training.py

Large diffs are not rendered by default.

5 changes: 2 additions & 3 deletions ibllib/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,12 @@
import logging
import warnings

__version__ = '2.26'
__version__ = '2.27'
warnings.filterwarnings('always', category=DeprecationWarning, module='ibllib')

# if this becomes a full-blown library we should let the logging configuration to the discretion of the dev
# who uses the library. However since it can also be provided as an app, the end-users should be provided
# with an useful default logging in standard output without messing with the complex python logging system
# -*- coding:utf-8 -*-
# with a useful default logging in standard output without messing with the complex python logging system
USE_LOGGING = True
#%(asctime)s,%(msecs)d
if USE_LOGGING:
Expand Down
20 changes: 15 additions & 5 deletions ibllib/io/extractors/mesoscope.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,24 +23,34 @@

def patch_imaging_meta(meta: dict) -> dict:
"""
Patch imaging meta data for compatibility across versions.
Patch imaging metadata for compatibility across versions.
A copy of the dict is NOT returned.
Parameters
----------
dict : dict
meta : dict
A folder path that contains a rawImagingData.meta file.
Returns
-------
dict
The loaded meta data file, updated to the most recent version.
The loaded metadata file, updated to the most recent version.
"""
# 2023-05-17 (unversioned) adds nFrames and channelSaved keys
if parse_version(meta.get('version') or '0.0.0') <= parse_version('0.0.0'):
# 2023-05-17 (unversioned) adds nFrames, channelSaved keys, MM and Deg keys
version = parse_version(meta.get('version') or '0.0.0')
if version <= parse_version('0.0.0'):
if 'channelSaved' not in meta:
meta['channelSaved'] = next((x['channelIdx'] for x in meta['FOV'] if 'channelIdx' in x), [])
fields = ('topLeft', 'topRight', 'bottomLeft', 'bottomRight')
for fov in meta.get('FOV', []):
for unit in ('Deg', 'MM'):
if unit not in fov: # topLeftDeg, etc. -> Deg[topLeft]
fov[unit] = {f: fov.pop(f + unit, None) for f in fields}
elif version == parse_version('0.1.0'):
for fov in meta.get('FOV', []):
if 'roiUuid' in fov:
fov['roiUUID'] = fov.pop('roiUuid')
return meta


Expand Down
91 changes: 30 additions & 61 deletions ibllib/io/extractors/video_motion.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,11 +40,7 @@ def find_nearest(array, value):


class MotionAlignment:
roi = {
'left': ((800, 1020), (233, 1096)),
'right': ((426, 510), (104, 545)),
'body': ((402, 481), (31, 103))
}
roi = {'left': ((800, 1020), (233, 1096)), 'right': ((426, 510), (104, 545)), 'body': ((402, 481), (31, 103))}

def __init__(self, eid=None, one=None, log=logging.getLogger(__name__), **kwargs):
self.one = one or ONE()
Expand Down Expand Up @@ -94,12 +90,9 @@ def line_select_callback(eclick, erelease):
return np.array([[x1, x2], [y1, y2]])

plt.imshow(frame)
roi = RectangleSelector(plt.gca(), line_select_callback,
drawtype='box', useblit=True,
button=[1, 3], # don't use middle button
minspanx=5, minspany=5,
spancoords='pixels',
interactive=True)
roi = RectangleSelector(plt.gca(), line_select_callback, drawtype='box', useblit=True, button=[1, 3],
# don't use middle button
minspanx=5, minspany=5, spancoords='pixels', interactive=True)
plt.show()
((x1, x2, *_), (y1, *_, y2)) = roi.corners
col = np.arange(round(x1), round(x2), dtype=int)
Expand All @@ -115,14 +108,13 @@ def load_data(self, download=False):
self.data.wheel = self.one.load_object(self.eid, 'wheel')
self.data.trials = self.one.load_object(self.eid, 'trials')
cam = self.one.load(self.eid, ['camera.times'], dclass_output=True)
self.data.camera_times = {vidio.label_from_path(url): ts
for ts, url in zip(cam.data, cam.url)}
self.data.camera_times = {vidio.label_from_path(url): ts for ts, url in zip(cam.data, cam.url)}
else:
alf_path = self.session_path / 'alf'
self.data.wheel = alfio.load_object(alf_path, 'wheel', short_keys=True)
self.data.trials = alfio.load_object(alf_path, 'trials')
self.data.camera_times = {vidio.label_from_path(x): alfio.load_file_content(x)
for x in alf_path.glob('*Camera.times*')}
self.data.camera_times = {vidio.label_from_path(x): alfio.load_file_content(x) for x in
alf_path.glob('*Camera.times*')}
assert all(x is not None for x in self.data.values())

def _set_eid_or_path(self, session_path_or_eid):
Expand Down Expand Up @@ -191,8 +183,7 @@ def align_motion(self, period=(-np.inf, np.inf), side='left', sd_thresh=10, disp
roi = (*[slice(*r) for r in self.roi[side]], 0)
try:
# TODO Add function arg to make grayscale
self.alignment.frames = \
vidio.get_video_frames_preload(camera_path, frame_numbers, mask=roi)
self.alignment.frames = vidio.get_video_frames_preload(camera_path, frame_numbers, mask=roi)
assert self.alignment.frames.size != 0
except AssertionError:
self.log.error('Failed to open video')
Expand Down Expand Up @@ -239,8 +230,8 @@ def align_motion(self, period=(-np.inf, np.inf), side='left', sd_thresh=10, disp
y = np.pad(self.alignment.df, 1, 'edge')
ax[0].plot(x, y, '-x', label='wheel motion energy')
thresh = stDev > sd_thresh
ax[0].vlines(x[np.array(np.pad(thresh, 1, 'constant', constant_values=False))], 0, 1,
linewidth=0.5, linestyle=':', label=f'>{sd_thresh} s.d. diff')
ax[0].vlines(x[np.array(np.pad(thresh, 1, 'constant', constant_values=False))], 0, 1, linewidth=0.5, linestyle=':',
label=f'>{sd_thresh} s.d. diff')
ax[1].plot(t[interp_mask], np.abs(v[interp_mask]))

# Plot other stuff
Expand Down Expand Up @@ -307,9 +298,7 @@ def init_plot():
data['frame_num'] = 0
mkr = find_nearest(wheel.timestamps[wheel_mask], ts_0)

data['marker'], = ax.plot(
wheel.timestamps[wheel_mask][mkr],
wheel.position[wheel_mask][mkr], 'r-x')
data['marker'], = ax.plot(wheel.timestamps[wheel_mask][mkr], wheel.position[wheel_mask][mkr], 'r-x')
ax.set_ylabel('Wheel position (rad))')
ax.set_xlabel('Time (s))')
return
Expand Down Expand Up @@ -338,19 +327,13 @@ def animate(i):
data['im'].set_data(frame)

mkr = find_nearest(wheel.timestamps[wheel_mask], t_x)
data['marker'].set_data(
wheel.timestamps[wheel_mask][mkr],
wheel.position[wheel_mask][mkr]
)
data['marker'].set_data(wheel.timestamps[wheel_mask][mkr], wheel.position[wheel_mask][mkr])

return data['im'], data['ln'], data['marker']

anim = animation.FuncAnimation(fig, animate, init_func=init_plot,
frames=(range(len(self.alignment.df))
if save
else cycle(range(60))),
interval=20, blit=False,
repeat=not save, cache_frame_data=False)
frames=(range(len(self.alignment.df)) if save else cycle(range(60))), interval=20,
blit=False, repeat=not save, cache_frame_data=False)
anim.running = False

def process_key(event):
Expand Down Expand Up @@ -445,19 +428,15 @@ def fix_keys(alf_object):
return ob

alf_path = self.session_path.joinpath('alf')
# Load in wheel data
wheel = (fix_keys(alfio.load_object(alf_path, 'wheel')) if location == 'SDSC'
else alfio.load_object(alf_path, 'wheel'))
wheel = (fix_keys(alfio.load_object(alf_path, 'wheel')) if location == 'SDSC' else alfio.load_object(alf_path, 'wheel'))
self.wheel_timestamps = wheel.timestamps
# Compute interpolated wheel position and wheel times
wheel_pos, self.wheel_time = wh.interpolate_position(wheel.timestamps, wheel.position, freq=1000)
# Compute wheel velocity
self.wheel_vel, _ = wh.velocity_filtered(wheel_pos, 1000)
# Load in original camera times
self.camera_times = alfio.load_file_content(next(alf_path.glob(f'_ibl_{self.label}Camera.times*.npy')))
# Find raw video file and load in the metadata
self.camera_path = str(next(self.session_path.joinpath('raw_video_data').glob(
f'_iblrig_{self.label}Camera.raw*.mp4')))
self.camera_path = str(next(self.session_path.joinpath('raw_video_data').glob(f'_iblrig_{self.label}Camera.raw*.mp4')))
self.camera_meta = vidio.get_video_meta(self.camera_path)

# TODO should read in the description file to get the correct sync location
Expand Down Expand Up @@ -597,8 +576,7 @@ def compute_motion_energy(self, first, last, wg, iw):
while np.any(idx == frames.shape[0] - 1) and counter < 20 and iw != wg.nwin - 1:
n_after_offset = (counter + 1) * n_frames
last += n_frames
extra_frames = vidio.get_video_frames_preload(cap, frame_numbers=np.arange(last, last + n_frames),
mask=self.mask)
extra_frames = vidio.get_video_frames_preload(cap, frame_numbers=np.arange(last, last + n_frames), mask=self.mask)
frames = np.concatenate([frames, extra_frames], axis=0)
idx = self.find_contaminated_frames(frames, self.threshold)
after_status = True
Expand Down Expand Up @@ -837,8 +815,7 @@ def single_cluster_raster(spike_times, events, trial_idx, dividers, colors, labe

dividers = [0] + dividers + [len(trial_idx)]
if axs is None:
fig, axs = plt.subplots(2, 1, figsize=(4, 6), gridspec_kw={'height_ratios': [1, 3], 'hspace': 0},
sharex=True)
fig, axs = plt.subplots(2, 1, figsize=(4, 6), gridspec_kw={'height_ratios': [1, 3], 'hspace': 0}, sharex=True)
else:
fig = axs[0].get_figure()

Expand All @@ -857,8 +834,7 @@ def single_cluster_raster(spike_times, events, trial_idx, dividers, colors, labe
psth_div = np.nanmean(psth[t_ids], axis=0)
std_div = np.nanstd(psth[t_ids], axis=0) / np.sqrt(len(t_ids))

axs[0].fill_between(t_psth, psth_div - std_div,
psth_div + std_div, alpha=0.4, color=colors[lid])
axs[0].fill_between(t_psth, psth_div - std_div, psth_div + std_div, alpha=0.4, color=colors[lid])
axs[0].plot(t_psth, psth_div, alpha=1, color=colors[lid])

lab_max = idx[np.argmax(t_ints)]
Expand All @@ -876,8 +852,7 @@ def single_cluster_raster(spike_times, events, trial_idx, dividers, colors, labe
secax = axs[1].secondary_yaxis('right')

secax.set_yticks(label_pos)
secax.set_yticklabels(label, rotation=90,
rotation_mode='anchor', ha='center')
secax.set_yticklabels(label, rotation=90, rotation_mode='anchor', ha='center')
for ic, c in enumerate(np.array(colors)[lidx]):
secax.get_yticklabels()[ic].set_color(c)

Expand Down Expand Up @@ -932,8 +907,7 @@ def plot_with_behavior(self):
ax02.set_ylabel('Frames')
ax02.set_xlabel('Time in session')

ax03.plot(self.camera_times, (self.camera_times - self.new_times) * self.camera_meta['fps'],
'k', label='extracted - new')
ax03.plot(self.camera_times, (self.camera_times - self.new_times) * self.camera_meta['fps'], 'k', label='extracted - new')
ax03.legend()
ax03.set_ylim(-5, 5)
ax03.set_ylabel('Frames')
Expand All @@ -946,8 +920,8 @@ def plot_with_behavior(self):
ax11.set_title('Wheel')
ax12.set_xlabel('Time from first move')

self.single_cluster_raster(self.camera_times, self.trials['firstMovement_times'].values, trial_idx, dividers,
['g', 'y'], ['left', 'right'], weights=feature_ext, fr=False, axs=[ax21, ax22])
self.single_cluster_raster(self.camera_times, self.trials['firstMovement_times'].values, trial_idx, dividers, ['g', 'y'],
['left', 'right'], weights=feature_ext, fr=False, axs=[ax21, ax22])
ax21.sharex(ax22)
ax21.set_ylabel('Paw r velocity')
ax21.set_title('Extracted times')
Expand All @@ -962,8 +936,7 @@ def plot_with_behavior(self):

ax41.imshow(self.frame_example[0])
rect = matplotlib.patches.Rectangle((self.roi[1][1], self.roi[0][0]), self.roi[1][0] - self.roi[1][1],
self.roi[0][1] - self.roi[0][0],
linewidth=4, edgecolor='g', facecolor='none')
self.roi[0][1] - self.roi[0][0], linewidth=4, edgecolor='g', facecolor='none')
ax41.add_patch(rect)

ax42.plot(self.all_me)
Expand Down Expand Up @@ -1003,17 +976,15 @@ def plot_without_behavior(self):
ax02.set_ylabel('Frames')
ax02.set_xlabel('Time in session')

ax03.plot(self.camera_times, (self.camera_times - self.new_times) * self.camera_meta['fps'],
'k', label='extracted - new')
ax03.plot(self.camera_times, (self.camera_times - self.new_times) * self.camera_meta['fps'], 'k', label='extracted - new')
ax03.legend()
ax03.set_ylim(-5, 5)
ax03.set_ylabel('Frames')
ax03.set_xlabel('Time in session')

ax04.imshow(self.frame_example[0])
rect = matplotlib.patches.Rectangle((self.roi[1][1], self.roi[0][0]), self.roi[1][0] - self.roi[1][1],
self.roi[0][1] - self.roi[0][0],
linewidth=4, edgecolor='g', facecolor='none')
self.roi[0][1] - self.roi[0][0], linewidth=4, edgecolor='g', facecolor='none')
ax04.add_patch(rect)

ax05.plot(self.all_me)
Expand All @@ -1038,8 +1009,8 @@ def process(self):

# Compute the motion energy of the wheel for the whole video
wg = WindowGenerator(self.camera_meta['length'], 5000, 4)
out = Parallel(n_jobs=self.nprocess)(delayed(self.compute_motion_energy)(first, last, wg, iw)
for iw, (first, last) in enumerate(wg.firstlast))
out = Parallel(n_jobs=self.nprocess)(
delayed(self.compute_motion_energy)(first, last, wg, iw) for iw, (first, last) in enumerate(wg.firstlast))
# Concatenate the motion energy into one big array
self.all_me = np.array([])
for vals in out[:-1]:
Expand All @@ -1050,8 +1021,7 @@ def process(self):
to_app = self.times[0] - ((np.arange(int(self.camera_meta['fps'] * toverlap), ) + 1) / self.frate)[::-1]
times = np.r_[to_app, self.times]

wg = WindowGenerator(all_me.size - 1, int(self.camera_meta['fps'] * self.twin),
int(self.camera_meta['fps'] * toverlap))
wg = WindowGenerator(all_me.size - 1, int(self.camera_meta['fps'] * self.twin), int(self.camera_meta['fps'] * toverlap))

out = Parallel(n_jobs=1)(delayed(self.compute_shifts)(times, all_me, first, last, iw, wg)
for iw, (first, last) in enumerate(wg.firstlast))
Expand All @@ -1075,8 +1045,7 @@ def process(self):

if self.upload:
fig = self.plot_with_behavior() if self.behavior else self.plot_without_behavior()
save_fig_path = Path(self.session_path.joinpath('snapshot', 'video',
f'video_wheel_alignment_{self.label}.png'))
save_fig_path = Path(self.session_path.joinpath('snapshot', 'video', f'video_wheel_alignment_{self.label}.png'))
save_fig_path.parent.mkdir(exist_ok=True, parents=True)
fig.savefig(save_fig_path)
snp = ReportSnapshot(self.session_path, self.eid, content_type='session', one=self.one)
Expand Down
17 changes: 8 additions & 9 deletions ibllib/oneibl/data_handlers.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,8 @@ def __init__(self, session_path, signatures, one=None):

# For cortex lab we need to get the endpoint from the ibl alyx
if self.lab == 'cortexlab':
self.globus.add_endpoint(f'flatiron_{self.lab}', alyx=ONE(base_url='https://alyx.internationalbrainlab.org').alyx)
alyx = AlyxClient(base_url='https://alyx.internationalbrainlab.org', cache_rest=None)
self.globus.add_endpoint(f'flatiron_{self.lab}', alyx=alyx)
else:
self.globus.add_endpoint(f'flatiron_{self.lab}', alyx=self.one.alyx)

Expand All @@ -140,21 +141,19 @@ def __init__(self, session_path, signatures, one=None):
def setUp(self):
"""Function to download necessary data to run tasks using globus-sdk."""
if self.lab == 'cortexlab':
one = ONE(base_url='https://alyx.internationalbrainlab.org')
df = super().getData(one=one)
df = super().getData(one=ONE(base_url='https://alyx.internationalbrainlab.org'))
else:
one = self.one
df = super().getData()
df = super().getData(one=self.one)

if len(df) == 0:
# If no datasets found in the cache only work off local file system do not attempt to download any missing data
# using globus
# If no datasets found in the cache only work off local file system do not attempt to
# download any missing data using Globus
return

# Check for space on local server. If less that 500 GB don't download new data
space_free = shutil.disk_usage(self.globus.endpoints['local']['root_path'])[2]
if space_free < 500e9:
_logger.warning('Space left on server is < 500GB, wont redownload new data')
_logger.warning('Space left on server is < 500GB, won\'t re-download new data')
return

rel_sess_path = '/'.join(df.iloc[0]['session_path'].split('/')[-3:])
Expand Down Expand Up @@ -190,7 +189,7 @@ def uploadData(self, outputs, version, **kwargs):
return register_dataset(outputs, one=self.one, versions=versions, repository=data_repo, **kwargs)

def cleanUp(self):
"""Clean up, remove the files that were downloaded from globus once task has completed."""
"""Clean up, remove the files that were downloaded from Globus once task has completed."""
for file in self.local_paths:
os.unlink(file)

Expand Down
Loading

0 comments on commit 65aa9a4

Please sign in to comment.