Skip to content

Commit

Permalink
Prepare for data analysis, organize LNP as package
Browse files Browse the repository at this point in the history
Tidy up function inputs so that there is no reference to variables
in the workspace. lnp.py is added to Python PATH variable and ready
to be used.

Delete unused empty file.
  • Loading branch information
ycanerol committed May 22, 2017
1 parent 8930b11 commit 46e408c
Show file tree
Hide file tree
Showing 5 changed files with 76 additions and 20 deletions.
8 changes: 5 additions & 3 deletions LNP_model → LNP_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
import numpy as np
from scipy.stats.mstats import mquantiles
from datetime import datetime
import matplotlib

execution_timer = datetime.now()

Expand All @@ -23,7 +24,7 @@
def make_noise(): # Generate gaussian noise for stimulus
return np.random.normal(0, 1, total_frames)

#stimulus = make_noise()
stimulus = make_noise()

filter_index1 = 1 # Change filter type here
filter_index2 = 3
Expand Down Expand Up @@ -82,7 +83,7 @@ def nlt(k, nlt_index):
print('{} spikes generated.'.format(int(sum(spikes))))


def sta(spikes, stimulus, filter_length):
def sta(spikes, stimulus, filter_length, total_frames):
snippets = np.zeros(filter_length)
for i in range(filter_length, total_frames):
if spikes[i] != 0:
Expand All @@ -91,7 +92,8 @@ def sta(spikes, stimulus, filter_length):
sta_unscaled = snippets/sum(spikes) # Normalize/scale the STA
sta_scaled = sta_unscaled/np.sqrt(sum(np.power(sta_unscaled, 2)))
return sta_scaled, sta_unscaled
recovered_kernel = sta(spikes, stimulus, filter_length)[0] # Use scaled STA
recovered_kernel = sta(spikes, stimulus, filter_length,total_frames)[0]
# Use scaled STA

filtered_recovery = np.convolve(recovered_kernel, stimulus,
mode='full')[:-filter_length+1]
Expand Down
Binary file added __pycache__/lnp.cpython-36.pyc
Binary file not shown.
67 changes: 67 additions & 0 deletions lnp.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 22 14:58:43 2017
@author: ycan
Functions for LNP model
Includes STA, STC to be used in data analysis
"""

import numpy as np
from scipy.stats.mstats import mquantiles


def sta(spikes, stimulus, filter_length, total_frames):
snippets = np.zeros(filter_length)
for i in range(filter_length, total_frames):
if spikes[i] != 0:
snippets = snippets+stimulus[i:i-filter_length:-1]*spikes[i]
# Snippets are inverted before being added
sta_unscaled = snippets/sum(spikes) # Normalize/scale the STA
sta_scaled = sta_unscaled/np.sqrt(sum(np.power(sta_unscaled, 2)))
return sta_scaled, sta_unscaled # Unscaled might be needed for STC


def log_nlt_recovery(spikes, filtered_recovery, bin_nr):
logbins = np.logspace(0, np.log(30)/np.log(10), bin_nr)
logbins = -logbins[::-1]+logbins
logbindices = np.digitize(filtered_recovery, logbins)
spikecount_in_logbins = np.array([])
for i in range(bin_nr):
spikecount_in_logbins = np.append(spikecount_in_logbins,
(np.average(spikes[np.where
(logbindices == i)]))/dt)
return logbins, spikecount_in_logbins


def q_nlt_recovery(spikes, filtered_recovery, bin_nr, dt):
quantiles = mquantiles(filtered_recovery,
np.linspace(0, 1, bin_nr, endpoint=False))
bindices = np.digitize(filtered_recovery, quantiles)
# Returns which bin each should go
spikecount_in_bins = np.array([])
for i in range(bin_nr): # Sorts values into bins
spikecount_in_bins = np.append(spikecount_in_bins,
(np.average(spikes[np.where
(bindices == i)])/dt))
return quantiles, spikecount_in_bins


def stc(spikes, stimulus, filter_length, total_frames, dt):
covariance = np.zeros((filter_length, filter_length))
sta_temp = sta(spikes, stimulus, filter_length, total_frames)[1]
# Unscaled STA
for i in range(filter_length, total_frames):
if spikes[i] != 0:
snippet = stimulus[i:i-filter_length:-1]
# Snippets are inverted before being added
snippet = snippet-np.dot(snippet, sta_temp)*sta_temp
# Project out the STA from snippets
snpta = np.array(snippet-sta_temp)[np.newaxis, :]
covariance = covariance+np.dot(snpta.T, snpta)*spikes[i]
return covariance/(sum(spikes)-1)

8 changes: 0 additions & 8 deletions noi.py

This file was deleted.

13 changes: 4 additions & 9 deletions stc.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,11 +12,9 @@

matplotlib.rcParams['figure.dpi'] = 100

sta_temp = sta(spikes, stimulus, filter_length)[1] # Unscaled STA


def stc(spikes, stimulus, filter_length, sta_temp):
def stc(spikes, stimulus, filter_length, total_frames):
covariance = np.zeros((filter_length, filter_length))
sta_temp = sta(spikes, stimulus, filter_length,total_frames)[1] # Unscaled STA
for i in range(filter_length, total_frames):
if spikes[i] != 0:
snippet = stimulus[i:i-filter_length:-1]
Expand All @@ -28,7 +26,7 @@ def stc(spikes, stimulus, filter_length, sta_temp):
covariance = covariance+np.dot(snpta.T, snpta)*spikes[i]
return covariance/(sum(spikes)-1)

recovered_stc = stc(spikes, stimulus, filter_length, sta_temp)
recovered_stc = stc(spikes, stimulus, filter_length, total_frames)

# %%
w, v = np.linalg.eig(recovered_stc)
Expand All @@ -52,7 +50,4 @@ def stc(spikes, stimulus, filter_length, sta_temp):
#quantiles_stc1,spikecount_in_bins_stc1 = q_nlt_recovery(spikes, filtered_recovery,100)
logbins_stc2, spikecount_in_logbins_stc2 = log_nlt_recovery(spikes,
filtered_recovery_stc2,
60, k)



60, k)

0 comments on commit 46e408c

Please sign in to comment.