diff --git a/basico/callbacks.py b/basico/callbacks.py index 875f872..1627d9b 100644 --- a/basico/callbacks.py +++ b/basico/callbacks.py @@ -103,12 +103,12 @@ def is_available(): def addItem(self, *args, **kwargs): self.count += 1 - self.ptrs[self.count] = { 'type': args[1], 'value': args[2], 'end': args[3] } + self.ptrs[self.count] = {'type': args[1], 'value': args[2], 'end': args[3]} initial = self._get_current_value(self.ptrs[self.count]) if np.isinf(initial): - initial = 0; + initial = 0 total = self._get_end_value(self.ptrs[self.count]) - self.handlers[self.count] = tqmd_lib.tqdm(desc=args[0], total=total, initial=initial, **self.args) + self.handlers[self.count] = tqmd_lib.tqdm(desc=args[0], total=total, initial=initial, **self.args) return self.count def finishItem(self, handle): @@ -131,7 +131,6 @@ def finish(self): return self.proceed() def setName(self, name): - #print(name) return self.proceed() diff --git a/basico/logging.py b/basico/logging.py index 6b1b408..218c86f 100644 --- a/basico/logging.py +++ b/basico/logging.py @@ -4,7 +4,7 @@ Logging convenience functions. -Originally from the `pypesto` package, see +Originally from the `pypesto` package, see https://github.com/ICB-DCM/pyPESTO """ diff --git a/basico/model_info.py b/basico/model_info.py index 0e43e47..f192090 100644 --- a/basico/model_info.py +++ b/basico/model_info.py @@ -774,7 +774,7 @@ def set_plot_curves(plot_spec, curves, **kwargs): | or `spectogram`) | 'channels': ['[X]', '[Y]'], # display names of all the items to be plotted | 'color': 'auto', # color as hex rgb value (i.e '#ff0000' for red) or 'auto' - | 'line_type': 'lines', # the line type (one of `lines`, `points`, `symbols` or + | 'line_type': 'lines', # the line type (one of `lines`, `points`, `symbols` or | `lines_and_symbols`) | 'line_subtype': 'solid', # line subtype (one of `solid`, `dotted`, `dashed`, `dot_dash` or | `dot_dot_dash`) @@ -902,7 +902,7 @@ def add_default_plot(name, **kwargs): :param name: name of the default plot :type name: str - + :param kwargs: optional arguments @@ -947,10 +947,10 @@ def add_default_plot(name, **kwargs): def get_default_plot_names(filter=None, **kwargs): """Returns a list of default plot names - + :param filter: optional filter of substring to be in the name - :param kwargs: - :return: + :param kwargs: + :return: """ ids = COPASI.COutputAssistant.getListOfDefaultOutputDescriptions() global _default_plots @@ -1067,12 +1067,12 @@ def set_report_dict(spec, precision=None, separator=None, table=None, print_headers=True, header=None, body=None, footer=None, task=None, comment=None, add_separator=None, **kwargs): """Sets properties of the named report definition. - + Examples: - + The following would set a report definition 'Time Course' to include Time and the concentration - of S, in a report that is separated by tabs. - + of S, in a report that is separated by tabs. + >>> set_report_dict('Time Course', body=['Time', '[S]'] The following defines a report for the Steady State concentration of S. To disambiguate, that the @@ -1091,10 +1091,10 @@ def set_report_dict(spec, precision=None, separator=None, table=None, :type separator: Optional[str] :param table: a list of CNs or display names of elements to collect in a table. If `table` is specified - the header, body, footer argument will be ignored. Note that setting table elements is only + the header, body, footer argument will be ignored. Note that setting table elements is only useful for tasks that generate output *during* the task. If that is not the case, you will have to specify the footer and header element directly. - + :type table: [str] :param print_headers: optional arguments, indicating whether table headers will be printed (only applies @@ -1115,7 +1115,7 @@ def set_report_dict(spec, precision=None, separator=None, table=None, :param comment: a documentation string for the report (can bei either string, or xhtml string) :type comment: Optional[str] - + :param add_separator: an optional boolean flag, to automatically add seprators between header, body and footer entries since this is not necessary for table entries. :type add_separator: Optional[bool] @@ -1312,7 +1312,7 @@ def _replace_cns_with_names(expression, **kwargs): words = _split_by_cn(expression) skip = -1 for i in range(len(words)): - if i < skip: + if i < skip: continue word = words[i] @@ -1321,7 +1321,7 @@ def _replace_cns_with_names(expression, **kwargs): cn = word word = '' elif word.startswith(''): cn += ' ' + words[i] @@ -1333,10 +1333,10 @@ def _replace_cns_with_names(expression, **kwargs): elif word.startswith('CN='): cn = word word = '' - else: + else: cn = None - if cn is not None: + if cn is not None: obj = dm.getObject(COPASI.CCommonName(cn)) if obj is not None: word = obj.getObjectDisplayName() @@ -1770,7 +1770,7 @@ def add_function(name, infix, type='general', mapping=None, **kwargs): if mapping is None: mapping = {} - + if db.findLoadFunction(name) is not None: logger.error('A function with name "' + name + '" already exists') return @@ -1784,11 +1784,11 @@ def add_function(name, infix, type='general', mapping=None, **kwargs): fun.setReversible(__function_type_to_int(type)) variables = fun.getVariables() - assert(isinstance(variables, COPASI.CFunctionParameters)) + assert (isinstance(variables, COPASI.CFunctionParameters)) for i in range(variables.size()): param = variables.getParameter(i) - assert(isinstance(param, COPASI.CFunctionParameter)) + assert (isinstance(param, COPASI.CFunctionParameter)) usage = __usage_to_int(mapping.get(param.getObjectName(), 'parameter')) param.setUsage(usage) @@ -1811,10 +1811,10 @@ def remove_function(name, **kwargs): root = COPASI.CRootContainer.getRoot() assert (isinstance(root, COPASI.CRootContainer)) db = root.getFunctionList() - assert(isinstance(db, COPASI.CFunctionDB)) + assert (isinstance(db, COPASI.CFunctionDB)) fun = db.findFunction(name) - assert(isinstance(fun, COPASI.CFunction)) + assert (isinstance(fun, COPASI.CFunction)) if fun is None: logger.warning('A function with name "' + name + '" does not exists') return @@ -1834,7 +1834,7 @@ def remove_user_defined_functions(): root = COPASI.CRootContainer.getRoot() assert (isinstance(root, COPASI.CRootContainer)) db = root.getFunctionList() - assert(isinstance(db, COPASI.CFunctionDB)) + assert (isinstance(db, COPASI.CFunctionDB)) funs = db.loadedFunctions() to_be_deleted = [] @@ -2188,7 +2188,7 @@ def get_compartments(name=None, exact=False, **kwargs): assert (isinstance(model, COPASI.CModel)) compartments = model.getCompartments() - assert(isinstance(compartments, COPASI.CompartmentVectorNS)) + assert (isinstance(compartments, COPASI.CompartmentVectorNS)) num_compartments = compartments.size() data = [] @@ -2263,7 +2263,7 @@ def get_parameters(name=None, exact=False, **kwargs): assert (isinstance(model, COPASI.CModel)) parameters = model.getModelValues() - assert(isinstance(parameters, COPASI.ModelValueVectorN)) + assert (isinstance(parameters, COPASI.ModelValueVectorN)) num_params = parameters.size() data = [] @@ -2349,7 +2349,7 @@ def get_functions(name=None, **kwargs): if suitable_for is None: logger.error('No reaction {0} found'.format(kwargs['suitable_for'])) return None - assert(isinstance(suitable_for, COPASI.CReaction)) + assert (isinstance(suitable_for, COPASI.CReaction)) eqn = suitable_for.getChemEq() num_substrates = eqn.getSubstrates().size() num_products = eqn.getProducts().size() @@ -2488,7 +2488,7 @@ def get_reaction_parameters(name=None, **kwargs): if fun_parameter.getUsage() != COPASI.CFunctionParameter.Role_PARAMETER: continue parameter = parameter_group.getParameter(fun_parameter.getObjectName()) - if parameter is None: + if parameter is None: continue current_param = param_objects[j][0] if param_objects[j] else None @@ -2661,7 +2661,7 @@ def set_compartment(name=None, exact=False, **kwargs): if name and type(name) is str and exact and name != current_name: continue - + if 'name' in kwargs and kwargs['name'] not in current_name: continue @@ -2801,7 +2801,7 @@ def set_parameters(name=None, exact=False, **kwargs): if name and type(name) is str and exact and name != current_name and name != display_name: continue - + if 'name' in kwargs and (kwargs['name'] not in current_name and kwargs['name'] != display_name): continue @@ -2906,7 +2906,7 @@ def set_reaction_parameters(name=None, **kwargs): if fun_parameter.getUsage() != COPASI.CFunctionParameter.Role_PARAMETER: continue param = parameter_group.getParameter(fun_parameter.getObjectName()) - if param is None: + if param is None: continue current_param = param_objects[j][0] if param_objects[j] else None cn = current_param.getCN() if current_param else None @@ -2972,9 +2972,9 @@ def set_reaction(name=None, exact=False, **kwargs): - | `scheme`: the reaction scheme, new species will be created automatically - | `function`: the function from the function database to set - + - | `mapping`: an optional dictionary that maps model elements to the function - | parameters. (can be any volume, species, modelvalue or in case of + | parameters. (can be any volume, species, modelvalue or in case of | local parameters a value) - | `notes`: sets notes for the reaction (either plain text, or valid xhtml) @@ -2998,7 +2998,7 @@ def set_reaction(name=None, exact=False, **kwargs): for i in range(num_reactions): reaction = reactions.get(i) - assert(isinstance(reaction, COPASI.CReaction)) + assert (isinstance(reaction, COPASI.CReaction)) current_name = reaction.getObjectName() @@ -3077,7 +3077,7 @@ def _set_reaction(reaction, dm, **kwargs): 'valid and cannot be applied. (missing mapping(s) for {2})'.format( reaction.getObjectName(), kwargs['function'], [entry['usage'] + ': ' + entry['name'] for entry in missing])) - + info.writeBackToReaction(reaction) reaction.compile() changed = True @@ -3761,7 +3761,7 @@ def set_element_name(element, new_name, **kwargs): if not element.setObjectName(new_name): logger.warning("couldn't change name of the element") return - + dm = model_io.get_model_from_dict_or_default(kwargs) assert (isinstance(dm, COPASI.CDataModel)) @@ -3908,7 +3908,7 @@ def remove_amount_expressions(**kwargs): keys = [] for mv in model.getModelValues(): - assert(isinstance(mv, COPASI.CModelValue)) + assert (isinstance(mv, COPASI.CModelValue)) if mv.getNotes().startswith('Amount ex'): keys.append(mv.getKey()) @@ -4289,7 +4289,7 @@ def add_equation(eqn, time_symbol='t', **kwargs): def _annotated_matrix_to_df_1d(ann_matrix): - assert(isinstance(ann_matrix, COPASI.CDataArray)) + assert (isinstance(ann_matrix, COPASI.CDataArray)) dim = ann_matrix.dimensionality() if dim != 1: logger.error('only one dimensional matrices are supported by this method') @@ -4314,8 +4314,8 @@ def _annotated_matrix_to_df(ann_matrix): :return: a pandas dataframe representing the matrix :rtype: pd.DataFrame """ - assert(isinstance(ann_matrix, COPASI.CDataArray)) - dim = ann_matrix.dimensionality() + assert (isinstance(ann_matrix, COPASI.CDataArray)) + dim = ann_matrix.dimensionality() if dim != 2: if dim == 1: return _annotated_matrix_to_df_1d(ann_matrix) @@ -4697,7 +4697,7 @@ def _collect_data(names=None, cns=None, **kwargs): # couldn't find that object in the model logger.warning('No object for cn: {0}'.format(str(cn))) continue - assert(isinstance(obj, COPASI.CDataObject)) + assert (isinstance(obj, COPASI.CDataObject)) value = _get_value_from_reference(obj) data.append({'name': obj.getObjectDisplayName(), 'value': value}) @@ -4887,7 +4887,7 @@ def _set_named_value(obj, name, new_value, ref): model.updateInitialValues(ref) -def set_value(name_or_reference, new_value, initial=False, **kwargs): +def set_value(name_or_reference, new_value, initial=False, **kwargs): """Gets the value of the named element or nones :param name_or_reference: display name of model element @@ -5069,8 +5069,8 @@ def assign_report(name, task, filename='', append=True, confirm_overwrite=True, if not report_definition: logger.error('No report definition: {0}'.format(name)) - assert(isinstance(report_definition, COPASI.CReportDefinition)) - assert(isinstance(task, COPASI.CCopasiTask)) + assert (isinstance(report_definition, COPASI.CReportDefinition)) + assert (isinstance(task, COPASI.CCopasiTask)) r = task.getReport() assert (isinstance(r, COPASI.CReport)) @@ -5147,7 +5147,7 @@ def set_scheduled_tasks(task_name, **kwargs): task_name = [task_name] for c_task in model.getTaskList(): - assert(isinstance(c_task, COPASI.CCopasiTask)) + assert (isinstance(c_task, COPASI.CCopasiTask)) c_task.setScheduled(c_task.getObjectName() in task_name) @@ -5248,7 +5248,7 @@ def get_parameter_sets(name=None, exact=False, values_only=False, **kwargs): result = [] sets = model.getModelParameterSets() - assert(isinstance(sets, COPASI.ModelParameterSetVectorN)) + assert (isinstance(sets, COPASI.ModelParameterSetVectorN)) for i in range(sets.size()): pset = sets.get(i) @@ -5289,7 +5289,7 @@ def remove_parameter_sets(name=None, exact=False, **kwargs): assert (isinstance(model, COPASI.CModel)) sets = model.getModelParameterSets() - assert(isinstance(sets, COPASI.ModelParameterSetVectorN)) + assert (isinstance(sets, COPASI.ModelParameterSetVectorN)) num_sets = sets.size() @@ -5804,7 +5804,7 @@ def _create_plot(plot_spec, data): count += 1 ax.plot(data[cn_to_index[curve['channels'][0]]].values, data[cn_to_index[curve['channels'][1]]].values, - label=curve['name']) + label=curve['name']) if plot_spec['log_x']: ax.set_xscale('log') if plot_spec['log_y']: diff --git a/basico/model_io.py b/basico/model_io.py index 925e882..c969615 100644 --- a/basico/model_io.py +++ b/basico/model_io.py @@ -138,7 +138,7 @@ def get_num_loaded_models(): def new_model(**kwargs): """Creates a new model and sets it as current. - + :param kwargs: optional arguments - `name` (str): the name for the new model @@ -277,7 +277,7 @@ def load_model(location, remove_user_defined_functions=False): except COPASI.CCopasiException: pass try: - if model.importSBML(location): + if model.importSBML(location): return set_current_model(model) except COPASI.CCopasiException: pass @@ -619,7 +619,7 @@ def save_model_and_data(filename, **kwargs): new_name = old_names.get(experiment.getFileNameOnly(), '') if not new_name: continue - + # set relative path experiment.setFileName(os.path.relpath(new_name, data_dir)) diff --git a/basico/petab/select.py b/basico/petab/select.py index 2577a43..a41a024 100644 --- a/basico/petab/select.py +++ b/basico/petab/select.py @@ -54,7 +54,7 @@ def default_evaluation(): basico.run_parameter_estimation(method=basico.PE.GENETIC_ALGORITHM_SR, update_model=True, settings={'method': { 'Number of Generations': 30, - 'Population Size': 10, + 'Population Size': 10, 'Stop after # Stalled Generations': 30 }}) logger.debug('running nl, 1000 iterations') diff --git a/basico/task_mca.py b/basico/task_mca.py index 311a15c..d20a921 100644 --- a/basico/task_mca.py +++ b/basico/task_mca.py @@ -58,9 +58,9 @@ def run_mca(**kwargs): def get_mca_matrix(matrix, scaled=True, run_first=False, **kwargs): """Returns the specified mca matrix as pandas data frame - :param matrix: the matrix to be returned, as string, one of `elasticities`, + :param matrix: the matrix to be returned, as string, one of `elasticities`, `flux_control_coefficients` or `concentration_control_coefficients` - :param scaled: boolean flag indicating whether the scaled matrix should be returned + :param scaled: boolean flag indicating whether the scaled matrix should be returned defaults to True :param run_first: boolean flag indicating that the task should be run first (defaults to false) @@ -81,7 +81,7 @@ def get_mca_matrix(matrix, scaled=True, run_first=False, **kwargs): model = model_io.get_model_from_dict_or_default(kwargs) assert (isinstance(model, COPASI.CDataModel)) - if run_first: + if run_first: run_mca(**kwargs) task = model.getTask(basico.T.MCA) @@ -93,12 +93,12 @@ def get_mca_matrix(matrix, scaled=True, run_first=False, **kwargs): return basico.model_info._annotated_matrix_to_df( method.getScaledElasticitiesAnn() if scaled else method.getUnscaledElasticitiesAnn() ) - + if matrix == 'concentration_control_coefficients': return basico.model_info._annotated_matrix_to_df( method.getScaledConcentrationCCAnn() if scaled else method.getUnscaledConcentrationCCAnn() ) - + if matrix == 'flux_control_coefficients': return basico.model_info._annotated_matrix_to_df( method.getScaledFluxCCAnn() if scaled else method.getUnscaledFluxCCAnn() @@ -111,7 +111,7 @@ def get_mca_matrix(matrix, scaled=True, run_first=False, **kwargs): def get_elasticities(scaled=True, run_first=False, **kwargs): """Returns the elasticity matrix as pandas data frame - :param scaled: boolean flag indicating whether the scaled matrix should be returned + :param scaled: boolean flag indicating whether the scaled matrix should be returned defaults to True :param run_first: boolean flag indicating that the task should be run first (defaults to false) @@ -133,7 +133,7 @@ def get_elasticities(scaled=True, run_first=False, **kwargs): def get_flux_control_coefficients(scaled=True, run_first=False, **kwargs): """Returns the flux control coefficient matrix as pandas data frame - :param scaled: boolean flag indicating whether the scaled matrix should be returned + :param scaled: boolean flag indicating whether the scaled matrix should be returned defaults to True :param run_first: boolean flag indicating that the task should be run first (defaults to false) @@ -155,7 +155,7 @@ def get_flux_control_coefficients(scaled=True, run_first=False, **kwargs): def get_concentration_control_coefficients(scaled=True, run_first=False, **kwargs): """Returns the concentration control coefficient matrix as pandas data frame - :param scaled: boolean flag indicating whether the scaled matrix should be returned + :param scaled: boolean flag indicating whether the scaled matrix should be returned defaults to True :param run_first: boolean flag indicating that the task should be run first (defaults to false) diff --git a/basico/task_optimization.py b/basico/task_optimization.py index df4c3fb..9691e5d 100644 --- a/basico/task_optimization.py +++ b/basico/task_optimization.py @@ -120,7 +120,7 @@ def get_opt_constraints(model=None): pe_task = model.getTask(basico.T.OPTIMIZATION) problem = pe_task.getProblem() assert (isinstance(problem, COPASI.COptProblem)) - + data = [] for i in range(problem.getOptConstraintSize()): @@ -398,7 +398,7 @@ def get_opt_solution(model=None): assert (isinstance(problem, COPASI.COptProblem)) solution = problem.getSolutionVariables() items = problem.getOptItemList() - assert(solution.size() == len(items)) + assert (solution.size() == len(items)) data = [] for i in range(solution.size()): @@ -516,7 +516,7 @@ def get_opt_statistic(**kwargs): 'failed_constraint_evals': problem.geFailedConstraintCounter(), 'cpu_time': problem.getExecutionTime(), } - if result['f_evals'] == 0: + if result['f_evals'] == 0: result['evals_per_sec'] = 0 else: result['evals_per_sec'] = result['cpu_time'] / result['f_evals'] diff --git a/basico/task_parameterestimation.py b/basico/task_parameterestimation.py index 97f41a0..c727adb 100644 --- a/basico/task_parameterestimation.py +++ b/basico/task_parameterestimation.py @@ -398,7 +398,7 @@ def _get_experiment_file(experiment, **kwargs): raise_error = kwargs.get('raise_error', True) if raise_error: raise ValueError('Experiment file {0} does not exist'.format(file_name_only)) - + if return_relative and directory and os.path.exists(file_name_only): try: return os.path.relpath(file_name_only, directory) @@ -926,7 +926,7 @@ def get_parameters_solution(model=None): assert (isinstance(problem, COPASI.CFitProblem)) solution = problem.getSolutionVariables() items = problem.getOptItemList() - assert(solution.size() == len(items)) + assert (solution.size() == len(items)) data = [] for i in range(solution.size()): @@ -1049,7 +1049,7 @@ def add_experiment(name, data, **kwargs): if obj is None: logger.warning("Can't find model element for {0}".format(current)) else: - assert(isinstance(obj, COPASI.CDataObject)) + assert (isinstance(obj, COPASI.CDataObject)) if obj.getObjectType() != 'Reference': try: obj = obj.getValueReference() @@ -1178,7 +1178,7 @@ def run_parameter_estimation(**kwargs): basico.set_task_settings(task, kwargs['settings']) num_messages_before = COPASI.CCopasiMessage.size() - + task.setCallBack(get_default_handler()) result = task.initializeRaw(COPASI.CCopasiTask.OUTPUT_UI) if not result: @@ -1189,7 +1189,7 @@ def run_parameter_estimation(**kwargs): if not result: logger.error("Error while initializing parameter estimation: " + basico.model_info.get_copasi_messages(num_messages_before)) - + task.restore() problem.setCreateParameterSets(old_create_parameter_sets) @@ -1277,7 +1277,7 @@ def get_simulation_results(values_only=False, update_parameters=True, **kwargs): steady_state_task.initializeRaw(COPASI.CCopasiTask.OUTPUT_UI) steady_state_task.processRaw(True) data = basico.model_info._collect_data(cns=mapping[mapping.type == 'dependent']['cn'].to_list()).transpose() - + for j in range(1, num_independent_points): container.applyInitialValues() container.updateSimulatedValues(False) @@ -1893,7 +1893,7 @@ def load_experiments_from_yaml(experiment_description, **kwargs): :return: """ - + if os.path.exists(experiment_description): with open(experiment_description, 'r') as stream: experiments = yaml.safe_load(stream) @@ -2009,7 +2009,7 @@ def add_experiment_from_dict(exp_dict, **kwargs): names = _get_nth_line_from_file(abs_data_file_name, int(exp_dict['header_row']), int(exp_dict['last_row'])) if names is not None: names = names.split(exp_dict['separator']) - else: + else: names = [i for i in range(columnNumber)] max_col = min(len(names), len(exp_dict['mapping'])) diff --git a/basico/task_scan.py b/basico/task_scan.py index 5dcd5d2..2bc01b3 100644 --- a/basico/task_scan.py +++ b/basico/task_scan.py @@ -379,7 +379,7 @@ def add_scan_item(**kwargs): if not cn_group: cn_group = copasi_item.addGroup('ParameterSet CNs') assert (isinstance(cn_group, COPASI.CCopasiParameterGroup)) - else: + else: cn_group.clear() for index, cn in enumerate(scan_item['parameter_sets']): diff --git a/basico/task_timecourse.py b/basico/task_timecourse.py index cec2bc0..6ccb03d 100644 --- a/basico/task_timecourse.py +++ b/basico/task_timecourse.py @@ -60,7 +60,7 @@ def __build_result_from_ts(time_series, use_concentrations=True, use_sbml_id=Fal concentrations = numpy.empty([row_count, col_count]) for i in range(row_count): for j in range(col_count): - if use_concentrations: + if use_concentrations: concentrations[i, j] = time_series.getConcentrationData(i, j) else: concentrations[i, j] = time_series.getData(i, j) @@ -341,7 +341,7 @@ def run_time_course(*args, **kwargs): use_concentrations = False use_sbml_id = kwargs.get('use_sbml_id', False) - + return __build_result_from_ts(task.getTimeSeries(), use_concentrations, use_sbml_id, model) diff --git a/examples/Accessing Models from Databases.py b/examples/Accessing Models from Databases.py index b174cdc..209fa0e 100644 --- a/examples/Accessing Models from Databases.py +++ b/examples/Accessing Models from Databases.py @@ -9,7 +9,7 @@ import sys if '..' not in sys.path: sys.path.append('..') - + import basico.biomodels as biomodels @@ -19,7 +19,7 @@ glycolysis_models = biomodels.search_for_model('glycolysis') -for model in glycolysis_models: +for model in glycolysis_models: print ('Id: %s' % model['id']) print ('Name: %s' % model['name']) print ('Format: %s' % model['format']) @@ -55,7 +55,7 @@ print ("Main FileName is: '{0}' and has size {1} kb".format(first_entry['name'], first_entry['fileSize'])) -# to actually get hold of the model itself, you can use the `get_content_for_model` function, that takes a model id, as well as an optional filename. If the filename is not given, the first main content will be chosen automatically. So to download the model of biomodel #206, once could simply call: +# to actually get hold of the model itself, you can use the `get_content_for_model` function, that takes a model id, as well as an optional filename. If the filename is not given, the first main content will be chosen automatically. So to download the model of biomodel #206, once could simply call: # In[7]: @@ -69,7 +69,7 @@ print(sbml[:1000]) # just printing the first couple of lines -# of course you can simply call `load_biomodel(206)` to load a biomodel into basico. +# of course you can simply call `load_biomodel(206)` to load a biomodel into basico. # ## JWS Online # we also provide access to models from JWS online. @@ -86,7 +86,7 @@ atp_models = jws.get_models_for_species('atp') -for model in atp_models: +for model in atp_models: print(model['slug']) diff --git a/examples/Getting and setting reaction parameters,reactions and metabolites with Basico.py b/examples/Getting and setting reaction parameters,reactions and metabolites with Basico.py index 10f2f23..da06623 100644 --- a/examples/Getting and setting reaction parameters,reactions and metabolites with Basico.py +++ b/examples/Getting and setting reaction parameters,reactions and metabolites with Basico.py @@ -70,19 +70,19 @@ # if 'name' in kwargs: # metab.setObjectName(kwargs['name']) -# +# # if 'unit' in kwargs: # metab.setUnitExpression(kwargs['unit']) -# +# # if 'initial_concentration' in kwargs: # metab.setInitialConcentration(kwargs['initial_concentration']), -# +# # if 'initial_particle_number' in kwargs: # metab.setInitialValue(kwargs['initial_particle_number']), -# +# # if 'initial_expression' in kwargs: # metab.setInitialExpression(kwargs['initial_expression']) -# +# # if 'expression' in kwargs: # metab.setExpression(kwargs['expression']) diff --git a/examples/Simple simulations with basico.py b/examples/Simple simulations with basico.py index ecdb133..5569f8f 100644 --- a/examples/Simple simulations with basico.py +++ b/examples/Simple simulations with basico.py @@ -19,8 +19,8 @@ from basico import * -# now we are ready to load a model, just adjust the file_name variable, to match yours. -# The file can be a COPASI or SBML file. For this example, the filename lies in the folder where i started the notebook from. +# now we are ready to load a model, just adjust the file_name variable, to match yours. +# The file can be a COPASI or SBML file. For this example, the filename lies in the folder where i started the notebook from. # In[3]: @@ -34,7 +34,7 @@ model = load_model(file_name) -# now we are ready to simulate. Calling `run_time_course` will run the simulation as specified in the COPASI file and return a pandas dataframe for it. +# now we are ready to simulate. Calling `run_time_course` will run the simulation as specified in the COPASI file and return a pandas dataframe for it. # In[5]: @@ -52,8 +52,8 @@ # ## The run_time_course command -# you can change different options for the time course by adding named parameters into the `run_time_course_command`. Supported are: -# +# you can change different options for the time course by adding named parameters into the `run_time_course_command`. Supported are: +# # * `model`: incase you want to use another model than the one last loaded # * `scheduled`: to mark the model as scheduled # * `update_model`: to update the initial state after the simulation is run @@ -62,7 +62,7 @@ # * `output_event`: in case you would like to have the event values before and after the event hit listed # * `start_time`: to change the start time # * `step_number` or `interals`: to overwrite the number of steps being used -# +# # so lets run two simulations that will be different slightly, as we will use the `update_model` flag: @@ -79,7 +79,7 @@ df1.plot(), df2.plot() -# And now you could plot the difference between them too: +# And now you could plot the difference between them too: # In[9]: diff --git a/examples/parallel_scan.py b/examples/parallel_scan.py index be25369..f2286db 100644 --- a/examples/parallel_scan.py +++ b/examples/parallel_scan.py @@ -16,12 +16,12 @@ def worker(args): """This is the worker function - :param args: the tuple with the arguments for the worker, that is expected - to consist of the seed to be used, as well as the model - string, that should be loaded in case no model is loaded yet. + :param args: the tuple with the arguments for the worker, that is expected + to consist of the seed to be used, as well as the model + string, that should be loaded in case no model is loaded yet. :type args: (int, str) - :return: tuple of: (initial concentration of cysteine, and adomed used, + :return: tuple of: (initial concentration of cysteine, and adomed used, flux of CGS, and TS) :rtype: (float, float, float, float) """ @@ -30,12 +30,12 @@ def worker(args): # we set the seed for reproducibility purposes only # you probably wouldnt do that normally random.seed(seed) - - + + # check if a model is already loaded into the worker # since the workers are being reused, and we are just sampling - # here, we dont want to reload the model multiple times. - + # here, we dont want to reload the model multiple times. + if get_num_loaded_models() == 0: # no model is loaded, so we load the model string m = load_model_from_string(model_string) @@ -45,9 +45,9 @@ def worker(args): # we sample the model as described in Mendes (2009) cysteine = 0.3 * 10 ** random.uniform(0, 3) - adomed = random.uniform(0, 100) - - # set the sampled initial concentration. + adomed = random.uniform(0, 100) + + # set the sampled initial concentration. set_species('Cysteine', initial_concentration=cysteine, model=m) set_species('S-adenosylmethionine', initial_concentration=adomed, model=m) @@ -100,7 +100,7 @@ def plot_result(result): # download biomodel #68 bm = load_biomodel(68); - # save it to string (that way we can easily pass it to any worker, no matter + # save it to string (that way we can easily pass it to any worker, no matter # whether the pool is local or MPI is used) cps_model_string = save_model_to_string() @@ -109,12 +109,12 @@ def plot_result(result): # measure computation time start = timer() - + # compute results result = run_in_parallel(cps_model_string) # print how long the computation took print('calculation took {0}'.format(timer() - start)) - + # plot results plot_result(result) diff --git a/tests/test_basico_info.py b/tests/test_basico_info.py index fce1419..0aec524 100644 --- a/tests/test_basico_info.py +++ b/tests/test_basico_info.py @@ -99,9 +99,9 @@ def test_notes(self): notes = basico.model_info.get_notes() self.assertTrue('The famous Brusselator' in notes) basico.model_info.set_notes(""" - + New Multiline Comment - + """) new_notes = basico.model_info.get_notes() self.assertTrue('Multiline' in new_notes) diff --git a/tests/test_mca.py b/tests/test_mca.py index 7d57fb6..32dcc21 100644 --- a/tests/test_mca.py +++ b/tests/test_mca.py @@ -13,7 +13,7 @@ def test_settings(self): self.assertTrue(settings['method']['Use Reder']) self.assertTrue(settings['method']['Use Smallbone']) - + def test_run(self): basico.run_mca() elasticities = basico.get_elasticities() diff --git a/tests/test_optimization.py b/tests/test_optimization.py index ff65772..507b0bd 100644 --- a/tests/test_optimization.py +++ b/tests/test_optimization.py @@ -12,7 +12,7 @@ def setUp(self): basico.add_parameter('y', initial_value=0) basico.add_parameter('f', type='assignment', expression='({Values[x].InitialValue}^2+{Values[y].InitialValue}-11)^2+({Values[x].InitialValue}+{Values[y].InitialValue}^2-7)^2') - + def test_optitems(self): self.assertIsNone(basico.get_opt_parameters()) template = basico.get_opt_item_template(include_global=True) diff --git a/tests/test_petab.py b/tests/test_petab.py index 446cf41..fc5b035 100644 --- a/tests/test_petab.py +++ b/tests/test_petab.py @@ -61,7 +61,7 @@ def test_dataframe_convert(self): result_df = mes_df.copy(True) result_df = result_df.rename(columns={"measurement": "simulation"}) basico.petab.core._update_df_from_simulation(result_df, sim_df, 'model1_data1') - result_df.to_csv('out_df.csv', index=False) + result_df.to_csv('out_df.csv', index=False) os.remove('out_df.csv') self.assertEqual(mes_df.shape[0], result_df.shape[0]) self.assertEqual(mes_df.shape[1], result_df.shape[1]) diff --git a/tests/test_profile.py b/tests/test_profile.py index 0153d4b..413095f 100644 --- a/tests/test_profile.py +++ b/tests/test_profile.py @@ -70,7 +70,7 @@ def test_creation_and_run(self): basico.new_model(name='Schaber Example', notes=""" The example from the supplement, originally described in - Schaber, J. and Klipp, E. (2011) Model-based inference of biochemical parameters and + Schaber, J. and Klipp, E. (2011) Model-based inference of biochemical parameters and dynamic properties of microbial signal transduction networks, Curr Opin Biotechnol, 22, 109- 116 (https://doi.org/10.1016/j.copbio.2010.09.014)