Skip to content

Commit

Permalink
Merge pull request #439 from Hjorthmedh/neuromod_redux2
Browse files Browse the repository at this point in the history
Neuromod redux2
  • Loading branch information
Hjorthmedh authored Dec 12, 2024
2 parents f0085d8 + 00b10ad commit d67cb2e
Show file tree
Hide file tree
Showing 21 changed files with 1,254 additions and 262 deletions.
40 changes: 40 additions & 0 deletions codemeta.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
{
"@context": "https://doi.org/10.5063/schema/codemeta-2.0",
"type": "SoftwareSourceCode",
"applicationCategory": "Neuroscience",
"author": [
{
"id": "https://orcid.org/0000-0002-9302-0750",
"type": "Person",
"affiliation": {
"type": "Organization",
"name": "Royal Institute of Technology (KTH)"
},
"email": "[email protected]",
"familyName": "Hjorth",
"givenName": "Johannes"
},
{
"id": "_:author_2",
"type": "Person",
"email": "[email protected]",
"familyName": "Thunberg",
"givenName": "Wilhelm"
}
],
"codeRepository": "https://github.com/hjorthmedh/Snudda",
"dateCreated": "2024-12-06",
"dateModified": "2024-09-30",
"datePublished": "2021-07-01",
"description": "Snudda creates the connectivity for realistic networks of simulated neurons in silico in a bottom up fashion that can then be simulated using the NEURON software. Neurons are placed within 3D meshes representing the structures of interest, with neural densities as seen in experiments. Based on reconstructed morphologies and neuron placement we can infer locations of putative synapses based on proximity between axon and dendrites. Projections between different structures can be added either using axon reconstructions, or by defining a connectivity map between regions. Putative synapses are pruned to match experimental pair-wise data on connectivity. Networks can be simulated either on desktop machines, or on super computers.",
"downloadUrl": "https://github.com/Hjorthmedh/Snudda.git",
"funder": {
"type": "Organization",
"name": "KTH, HBP, EBRAINS 2.0, VBT"
},
"name": "Snudda",
"programmingLanguage": "Python",
"softwareRequirements": "Python 3.8+",
"version": "2.1.3",
"referencePublication": "https://doi.org/10.1007/s12021-021-09531-w"
}
Original file line number Diff line number Diff line change
Expand Up @@ -293,7 +293,12 @@
"for data_type in all_species_data:\n",
" fig.add_trace(go.Scatter(x=time, y=all_species_data[data_type][0][0].T[0], name=data_type))\n",
"\n",
"fig.update_layout(xaxis_title=\"Time (s)\", yaxis_title=\"Concentration\", width=1000, height=800)\n",
"fig.update_layout(xaxis_title=\"Time (s)\", yaxis_title=\"Concentration\", width=1000, height=800,\n",
" font={\"size\":18}, # General font size for all elements\n",
" legend={\"font\":{\"size\":16}}, # Specific font size for legend\n",
" xaxis={\"title\":{\"font\":{\"size\":20}}, \"tickfont\":{\"size\":14}}, # X-axis title and tick labels\n",
" yaxis={\"title\":{\"font\":{\"size\":20}}, \"tickfont\":{\"size\":14}}) # Y-axis title and tick labels\n",
"\n",
"fig.show()"
]
},
Expand Down
250 changes: 158 additions & 92 deletions examples/notebooks/neuromodulation/neuromodulation_bath_current.ipynb

Large diffs are not rendered by default.

731 changes: 731 additions & 0 deletions examples/notebooks/optimise_prune/OLD2/OptimisePrune-ChIN-NGF.ipynb

Large diffs are not rendered by default.

255 changes: 113 additions & 142 deletions examples/notebooks/optimise_prune/OptimisePrune.ipynb

Large diffs are not rendered by default.

27 changes: 26 additions & 1 deletion snudda/detect/detect.py
Original file line number Diff line number Diff line change
Expand Up @@ -2611,8 +2611,10 @@ def setup_parallel(self, d_view=None):
self.write_log("Workers already initialised.")
return

self.write_log(f"setup_parallel: {d_view = }")

with d_view.sync_imports():
from snudda.detect.detect import SnuddaDetect
from snudda import SnuddaDetect

self.write_log(f"Setting up workers: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())}")

Expand Down Expand Up @@ -2643,6 +2645,29 @@ def setup_parallel(self, d_view=None):
"snudda_data=snudda_data,"
"hyper_voxel_size=hyper_voxel_size,verbose=verbose,logfile_name=logfile_name[0],"
"save_file=save_file,slurm_id=slurm_id,role='worker', random_seed=random_seed)")

cmd_str = """
try:
sd = SnuddaDetect(config_file=config_file, position_file=position_file,voxel_size=voxel_size,
snudda_data=snudda_data,
hyper_voxel_size=hyper_voxel_size,verbose=verbose,logfile_name=logfile_name[0],
save_file=save_file,slurm_id=slurm_id,role='worker', random_seed=random_seed)
except Exception as e:
import os
import datetime
import traceback
engine_id = os.getpid()
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
log_file_name = f"worker_error_{engine_id}_{timestamp}.log"
with open(log_file_name, "w") as log_file:
log_file.write(f"Error initializing SnuddaDetect:\\n{traceback.format_exc()}\\n")
log_file.write(f"Engine ID: {engine_id}\\n")
log_file.write(f"Timestamp: {timestamp}\\n")
log_file.write(f"Parameters: config_file={config_file}, position_file={position_file}, voxel_size={voxel_size}\\n")
raise # Rethrow exception
"""

d_view.execute(cmd_str, block=True)

self.write_log(f"Workers setup: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())}")
Expand Down
15 changes: 11 additions & 4 deletions snudda/input/input_tuning.py
Original file line number Diff line number Diff line change
Expand Up @@ -395,18 +395,22 @@ def find_signal_strength(self, requested_frequency=10.0, skip_time=0.0, show_plo

# Check if spike frequency is too low, if so give image a different name

move_bad = False

if depol_block:
label = f"signal-{requested_frequency}-Hz-BLOCKED"
move_bad = True
else:
label = f"signal-{requested_frequency}-Hz"

if np.max(spike_count_mean) < requested_spikes:
label = f"{label}-TOO-LOW-FREQ"
move_bad = True

self.plot_signal_info(neuron_id=neuron_id, neuron_info=neuron_info, best_config=best_config,
spike_count=spike_count, input_config=input_config, max_time=np.max(time),
requested_frequency=requested_frequency, depol_block_flag=depol_block_flag,
label=label, show_plot=show_plot)
label=label, show_plot=show_plot, move_bad=move_bad)

for name in bad_neuron_list:
print(f"Found early depolarisation block: {name}")
Expand Down Expand Up @@ -572,15 +576,18 @@ def plot_background_info(self, neuron_id, neuron_info, best_neuron_id, spike_cou
def plot_signal_info(self, neuron_id, neuron_info, best_config, spike_count, input_config,
max_time, requested_frequency, skip_time=0,
label="background-inputs", show_plot=True,
depol_block_flag=None):
depol_block_flag=None, move_bad=False):

import matplotlib.pyplot as plt

n_inputs_total = np.zeros((len(neuron_id),), dtype=int)
fig_dir = os.path.join(self.network_path, "figures")

if move_bad:
fig_dir = os.path.join(fig_dir, "_BAD")

if not os.path.isdir(fig_dir):
os.mkdir(fig_dir)
os.makedirs(fig_dir)

fig_name = os.path.join(fig_dir, f"{neuron_info['morphology_key']}-{neuron_info['parameter_key']}-{neuron_info['name']}-{label}.png")

Expand Down Expand Up @@ -894,7 +901,7 @@ def plot_depolarisation_blocked_neurons(self, freq_bin=10):
f"{full_morph_key}-{full_param_key}-{neuron_type}-BAD-trace.png")

if not os.path.exists(os.path.dirname(fig_path)):
os.mkdir(os.path.dirname(fig_path))
os.makedirs(os.path.dirname(fig_path))

plt.savefig(fig_path, dpi=300)

Expand Down
4 changes: 4 additions & 0 deletions snudda/optimise/optimise_pruning.py
Original file line number Diff line number Diff line change
Expand Up @@ -392,6 +392,10 @@ def optimize(self, pre_type, post_type, con_type,

self.log_file.close()

# Clear the cache of OptimisePruning, so people can do other networks in same program
if "op" in vars(OptimisePruning):
del OptimisePruning.op

return res

@staticmethod
Expand Down
6 changes: 5 additions & 1 deletion snudda/place/place.py
Original file line number Diff line number Diff line change
Expand Up @@ -949,7 +949,9 @@ def write_data(self, file_name=None):

neuron_path = [snudda_simplify_path(n.neuron_path, self.snudda_data).encode("ascii", "ignore") for n in self.neurons]
max_np_len = max([len(x) for x in neuron_path])
neuron_group.create_dataset("neuron_path", (len(neuron_path),), f"S{max_np_len}", neuron_path)
# neuron_group.create_dataset("neuron_path", (len(neuron_path),), f"S{max_np_len}", neuron_path)
neuron_group.create_dataset("neuron_path", (len(neuron_path),), data=neuron_path,
dtype=h5py.special_dtype(vlen=str))

virtual_neuron_list = np.array([n.virtual_neuron for n in self.neurons], dtype=bool)
virtual_neuron = neuron_group.create_dataset("virtual_neuron",
Expand Down Expand Up @@ -1128,6 +1130,8 @@ def write_data(self, file_name=None):

pos_file.close()

self.write_log(f"Write done.")

############################################################################

def define_population_units(self, config):
Expand Down
10 changes: 10 additions & 0 deletions snudda/place/region_mesh_redux.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,6 +153,16 @@ def __init__(self, mesh_path: str, d_min: float, random_seed=None, rng=None,
putative_points = self.remove_close_neurons(putative_points)
putative_points = self.remove_outside(putative_points)

if putative_points.shape[0] < 0.05 * n_putative_points:
print(f"Managed to create {putative_points.shape[0]} putative points within the volume.\n"
f" WARNING --> is the volume too small? You can create new cube mesh using create_cube_mesh.py\n\n"
f"Example how to use create_cube_mesh.py:\n"
f"from snudda.place.create_cube_mesh import create_cube_mesh\n"
f"create_cube_mesh(file_name='your_cube_mesh_name.obj', \n"
f" centre_point=(0,0,0), side_len=300e-6,\n"
f" description='Adjust side_len to get correct neuron density')\n"
)

self.putative_points = putative_points
self.allocated_points = np.zeros(shape=(putative_points.shape[0],), dtype=bool)

Expand Down
2 changes: 1 addition & 1 deletion snudda/simulate/simulate.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ def __init__(self,
if "snudda_data" in self.sim_info:
# Do not change this unless you know what you are doing
self.snudda_data = self.sim_info["snudda_data"]
self.write_log(f"Reading snudda_data {self.snudda_data}")
self.write_log(f"Reading snudda_data {self.snudda_data} from config file", force_print=True)

if "current_injection_file" in self.sim_info:
current_file = self.sim_info["current_injection_file"]
Expand Down
7 changes: 6 additions & 1 deletion snudda/synaptic_fitting/optimise_synapses_full.py
Original file line number Diff line number Diff line change
Expand Up @@ -554,7 +554,12 @@ def setup_model(self,
if n_synapses_override is not None:
n_synapses = n_synapses_override
else:
n_synapses = c_prop["num_synapses"]
if "num_synapses" in c_prop:
n_synapses = c_prop["num_synapses"]
elif 'nSynapses' in c_prop:
n_synapses = c_prop['nSynapses']
else:
raise Exception('Setup error: number of synapses not no specified in setup file (which ever that is?)')

if "holding_current" in c_prop:
holding_current = c_prop["holding_current"]
Expand Down
8 changes: 4 additions & 4 deletions snudda/synaptic_fitting/run_little_synapse_run.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ def __init__(self,
# Remove VClamp
self.v_clamp = None

self.i_clamp = neuron.h.i_clamp(self.soma(0.5))
self.i_clamp = neuron.h.IClamp(self.soma(0.5))
self.i_clamp.amp = cur # nA
self.i_clamp.dur = 2 * self.time * 1e3

Expand Down Expand Up @@ -248,9 +248,9 @@ def run(self, tau, tau_r, tau_f, u, cond=None, time=None):
# Convert from SI units to natural units that Neuron uses
self.nc_syn.weight[0] = 1 * cond * 1e6
self.little_synapse.tau = tau * 1e3
self.little_synapse.tau_r = tau_r * 1e3
self.little_synapse.tau_f = tau_f * 1e3
self.little_synapse.u = u
self.little_synapse.tauR = tau_r * 1e3
self.little_synapse.tauF = tau_f * 1e3
self.little_synapse.U = u

# print(self.littleSynapse.tau)

Expand Down
89 changes: 83 additions & 6 deletions snudda/utils/sbtab_to_snudda.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
import numpy as np
import pandas as pd
import json
import quantities as pq


# See convert_sbtab_to_json.sh -- example

Expand Down Expand Up @@ -43,6 +45,12 @@ def __init__(self, sbtab_path, out_filename,
self.parameters_data = None
self.constants_data = None

self.default_concentration_unit = "molar"
self.unit_dict = dict()

self.unit_dict["nmol_unit"] = pq.UnitQuantity('nanomole', pq.nano * pq.mole, symbol='nmol')
self.unit_dict["nM_unit"] = pq.UnitQuantity('nanomolar', pq.nano * pq.molar, symbol='nM')

self.parameters = dict()

self.data = dict()
Expand Down Expand Up @@ -74,14 +82,14 @@ def _get_optional_value(self, row, value_name, default_value=0):

def parse(self):

import quantities as pq

self.data["species"] = dict()
self.data["rates"] = dict()
self.data["reactions"] = dict()

nmol_unit = pq.UnitQuantity('nanomole', pq.nano * pq.mole, symbol='nmol')
nM_unit = pq.UnitQuantity('nanomolar', pq.nano * pq.molar, symbol='nM')
original_sbtab_units = dict()

nmol_unit = self.unit_dict["nmol_unit"]
nM_unit = self.unit_dict["nM_unit"]

conc_unit = nM_unit

Expand All @@ -101,6 +109,9 @@ def parse(self):
}

self.data["species"][species_name] = species_data
original_sbtab_units[species_name] = species_unit.symbol

self.get_parameters() # TODO, not finished yet!

for row_idx, row in self.reactions_data.iterrows():
reaction_name = row["!Name"]
Expand All @@ -114,9 +125,75 @@ def get_parameters(self):
parameter_name = row["!Name"]
parameter_value = row["!Value:linspace"]

# We assume that reaction name kf_R0, kr_R0 are related to R0 reaction.
reaction_name = row["!Name"].split("_")[-1]

reaction_row = self.reactions_data[self.reactions_data["!ID"] == reaction_name]

if len(reaction_row) != 1:
raise KeyError(f"Unable to find exactly one line for reaction {reaction_name} "
f"in {self.reactions_filename} (found {len(reaction_row)})")

kinetic_law = reaction_row["!KineticLaw"][0]

reaction_components = None

for part_kinetic in kinetic_law.split("-"):
reaction_parts = part_kinetic.split("*")

# Hidden assumption, the reaction rate is before components
if parameter_name in reaction_parts[0]:
reaction_components = reaction_parts[1:]

if reaction_components is None:
raise KeyError(f"Unable to find reaction {parameter_name}, we assume "
f"it is before compounds in {self.reactions_filename} column !KineticLaw")

original_unit_str = ""
target_unit_str = ""

# Next we replace components with their units, and use the quantities library to calculate conversion factor
for rc in reaction_components:
rc_name = rc.split("^")[0]

try:
compound_row = self.compounds_data[self.compounds_data["!Name"] == rc_name]
except:
import traceback
print(traceback.format_exc())
import pdb
pdb.set_trace()

if len(compound_row) != 1:
raise KeyError(f"The compound {rc_name} does not appear on a unique row in {self.compounds_filename}")

if original_unit_str != "":
original_unit_str += " * "
target_unit_str += " * "

import pdb
pdb.set_trace()

# TODO: 2024-12-06, verify unit scaling for k_f and k_r

original_unit_str += f"({compound_row['!Unit'].iloc[0]})"
target_unit_str += f"({self.default_concentration_unit})"

if "^" in rc:
original_unit_str += f"^{rc.split('^')[1]}"
target_unit_str += f"^{rc.split('^')[1]}"

nmol_unit = self.unit_dict["nmol_unit"]
nM_unit = self.unit_dict["nM_unit"]

original_unit = pq.CompoundUnit(original_unit_str)
target_unit = pq.CompoundUnit(target_unit_str)

scale = original_unit.rescale(target_unit)

# We need to convert to SI units...

scale = row["!Scale"]
# scale = row["!Scale"]

# Vi behöver ta hänsyn till om det är log10 eller annan skala, eller använda linjära värdet -- vilket är bäst?
# Hämta ut K_forward, K_backwards (om det finns), spara i self.parameters
Expand All @@ -131,7 +208,7 @@ def get_parameters(self):

# TODO: Continue here!!

self.parameters[parameter_name] = parameter_value
self.parameters[parameter_name] = parameter_value * scale

import pdb
pdb.set_trace()
Expand Down
Loading

0 comments on commit d67cb2e

Please sign in to comment.