Skip to content

Commit

Permalink
Merge pull request #294 from AllenInstitute/fix/running
Browse files Browse the repository at this point in the history
missing intervalsms workaround
  • Loading branch information
neuromusic authored Jul 20, 2018
2 parents fbadcc9 + aafb611 commit 8f9d28d
Show file tree
Hide file tree
Showing 10 changed files with 129 additions and 9 deletions.
2 changes: 1 addition & 1 deletion setup.cfg
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
[bumpversion]
current_version = 0.4.2
current_version = 0.4.3
commit = True
tag = False
parse = (?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)(\.(?P<release>[a-z]+)(?P<n>\d+))?
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

setuptools.setup(
name="visual-behavior",
version="0.4.2",
version="0.4.3",
author="Justin Kiggins",
author_email="[email protected]",
description="analysis package for visual behavior",
Expand Down
42 changes: 42 additions & 0 deletions tests/validation/test_core.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,48 @@
import pandas as pd
from visual_behavior.validation.core import *

def test_parse_log():

EXPECTED = dict(
levelname="ERROR",
name="package.module",
message="This is the error"
)

parsed = parse_log("{levelname}::{name}::{message}".format(**EXPECTED))
assert parsed == EXPECTED

def test_count_read_errors():

EMPTY = dict(
log=[]
)
results = count_read_errors(EMPTY)
print(results)
assert 'ERROR' not in results

INFO = dict(
log=["INFO::package.module::informative message"]
)

results = count_read_errors(INFO)
print(results)
assert 'ERROR' not in results
assert results['INFO']==1

def test_validate_no_read_errors():

WITH_ERRORS = dict(
log=["ERROR::package.module::error message"]
)

WITHOUT_ERRORS = dict(
log=["INFO::package.module::informative message"]
)

assert validate_no_read_errors(WITH_ERRORS)==False
assert validate_no_read_errors(WITHOUT_ERRORS)==True


def test_validate_running_data():
# good data: length matches time and not all values the same
Expand Down
2 changes: 1 addition & 1 deletion visual_behavior/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = "0.4.2"
__version__ = "0.4.3"
21 changes: 19 additions & 2 deletions visual_behavior/translator/foraging/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,11 @@
import numpy as np
from scipy.signal import medfilt
from .extract import get_end_time
from ...utilities import calc_deriv, rad_to_dist, local_time
from ...utilities import calc_deriv, rad_to_dist, local_time, ListHandler, DoubleColonFormatter

import logging

logger = logging.getLogger(__name__)

warnings.warn(
"support for the loading stimulus_code outputs will be deprecated in a future version",
Expand All @@ -29,10 +33,20 @@ def data_to_change_detection_core(data, time=None):
- currently doesn't require or check that the `task` field in the
experiment data is "DoC" (Detection of Change)
"""

log_messages = []
handler = ListHandler(log_messages)
handler.setFormatter(
DoubleColonFormatter
)
logger.addHandler(
handler
)

if time is None:
time = load_time(data)

return {
core_data = {
"time": time,
"metadata": load_metadata(data),
"licks": load_licks(data, time=time),
Expand All @@ -42,6 +56,9 @@ def data_to_change_detection_core(data, time=None):
"visual_stimuli": load_visual_stimuli(data, time=time),
}

core_data['log'] = log_messages
return core_data


def load_metadata(data):
fields = (
Expand Down
23 changes: 21 additions & 2 deletions visual_behavior/translator/foraging2/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import pandas as pd
from ...utilities import local_time
from ...utilities import local_time, ListHandler, DoubleColonFormatter

from ...devices import get_rig_id
from .extract import get_trial_log, get_stimuli, get_pre_change_time, \
Expand All @@ -18,6 +18,10 @@

from .extract_stimuli import get_visual_stimuli

import logging

logger = logging.getLogger(__name__)


def data_to_change_detection_core(data):
"""Core data structure to be used across all analysis code?
Expand All @@ -37,7 +41,18 @@ def data_to_change_detection_core(data):
- currently doesn't require or check that the `task` field in the
experiment data is "DoC" (Detection of Change)
"""
return {

log_messages = []
handler = ListHandler(log_messages)
handler.setFormatter(
DoubleColonFormatter
)

logger.addHandler(
handler
)

core_data = {
"metadata": data_to_metadata(data),
"time": data_to_time(data),
"licks": data_to_licks(data),
Expand All @@ -47,6 +62,10 @@ def data_to_change_detection_core(data):
"visual_stimuli": data_to_visual_stimuli(data),
}

core_data['log'] = log_messages

return core_data


def expand_dict(out_dict, from_dict, index):
"""there is obviously a better way...
Expand Down
5 changes: 3 additions & 2 deletions visual_behavior/translator/foraging2/extract.py
Original file line number Diff line number Diff line change
Expand Up @@ -709,8 +709,9 @@ def get_running_speed(exp_data, smooth=False, time=None):
dx = medfilt(dx, kernel_size=5) # remove big, single frame spikes in encoder values
dx = np.cumsum(dx) # wheel rotations

if len(time) != len(dx):
raise ValueError("dx and time must be the same length")
if len(time) < len(dx):
logger.error('intervalsms record appears to be missing entries')
dx = dx[:len(time)]

speed = calc_deriv(dx, time)
speed = rad_to_dist(speed)
Expand Down
17 changes: 17 additions & 0 deletions visual_behavior/utilities.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from __future__ import print_function
from dateutil import tz
import logging
import numpy as np
import pandas as pd
from scipy.stats import norm
Expand Down Expand Up @@ -159,3 +160,19 @@ def local_time(iso_timestamp, timezone=None):
if not datetime.tzinfo:
datetime = datetime.replace(tzinfo=tz.gettz('America/Los_Angeles'))
return datetime.isoformat()


class ListHandler(logging.Handler):
"""docstring for ListHandler."""
def __init__(self, log_list):
super(ListHandler, self).__init__()
self.log_list = log_list

def emit(self, record):
entry = self.format(record)
self.log_list.append(entry)


DoubleColonFormatter = logging.Formatter(
"%(levelname)s::%(name)s::%(message)s",
)
23 changes: 23 additions & 0 deletions visual_behavior/validation/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,29 @@
from .extended_trials import get_first_lick_relative_to_scheduled_change


def parse_log(log_record):
levelname, name, message = log_record.split('::')
return dict(
levelname=levelname,
name=name,
message=message,
)


def count_read_errors(core_data):
log = [parse_log(log_record) for log_record in core_data['log']]
log = pd.DataFrame(log, columns=['levelname', 'name', 'message'])
return log.groupby('levelname').size().to_dict()


def validate_no_read_errors(core_data):
error_counts = count_read_errors(core_data)

n_errors = error_counts.get('ERROR', 0) + error_counts.get('CRITICAL', 0)

return (n_errors == 0)


def validate_running_data(core_data):
'''
for each sampling frame, the value of the encoder should be known
Expand Down
1 change: 1 addition & 0 deletions visual_behavior/validation/qc.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,7 @@ def define_validation_functions(core_data):
cd.validate_licks: (core_data,), # this one doesn't take trials
cd.validate_minimal_dropped_frames: (core_data,), # this one doesn't take trials
# f2.validate_frame_intervals_exists:(data), # this one doesn't take trials
cd.validate_no_read_errors: (core_data,),
}

return validation_functions
Expand Down

0 comments on commit 8f9d28d

Please sign in to comment.