diff --git a/t3/main.py b/t3/main.py index c8a52b38..45ad30c5 100755 --- a/t3/main.py +++ b/t3/main.py @@ -158,7 +158,7 @@ def __init__(self, clean_dir: bool = False, ): - self.sa_dict = None + self.sa_dict, self.sa_dict_idt = None, None self.sa_observables = list() self.t0 = datetime.datetime.now() # initialize the timer as datetime object @@ -313,9 +313,12 @@ def execute(self): sa_rtol=self.t3['sensitivity']['rtol'], ) simulate_adapter.simulate() - self.sa_dict = simulate_adapter.get_sa_coefficients() - save_yaml_file(path=self.paths['SA dict'], content=self.sa_dict) - if self.t3['sensitivity']['global_observables'] == 'IDT': + self.sa_dict = simulate_adapter.get_sa_coefficients( + top_SA_species=self.t3['sensitivity']['top_SA_species'], + top_SA_reactions=self.t3['sensitivity']['top_SA_reactions'], + max_workers=self.t3['sensitivity']['max_workers'], + save_yaml=True) + if self.t3['sensitivity']['global_observables'].lower() == 'idt': simulate_adapter = simulate_factory(simulate_method='CanteraIDT', t3=self.t3, rmg=self.rmg, @@ -327,8 +330,11 @@ def execute(self): sa_rtol=self.t3['sensitivity']['rtol'], ) simulate_adapter.simulate() - self.sa_dict_idt = simulate_adapter.get_sa_coefficients() - save_yaml_file(path=self.paths['SA idt dict'], content=self.sa_dict_idt) + self.sa_dict_idt = simulate_adapter.get_sa_coefficients( + top_SA_species=self.t3['sensitivity']['top_SA_species'], + top_SA_reactions=self.t3['sensitivity']['top_SA_reactions'], + max_workers=self.t3['sensitivity']['max_workers'], + save_yaml=True) additional_calcs_required = self.determine_species_and_reactions_to_calculate() @@ -395,6 +401,7 @@ def set_paths(self, 'SA input': os.path.join(iteration_path, 'SA', 'input.py'), 'SA dict': os.path.join(iteration_path, 'SA', 'sa.yaml'), 'SA IDT dict': os.path.join(iteration_path, 'SA', 'sa_idt.yaml'), + 'SA IDT dict top X': os.path.join(iteration_path, 'SA', 'sa_idt_top_x.yaml'), 'PDep SA': os.path.join(iteration_path, 'PDep_SA'), 'ARC': os.path.join(iteration_path, 'ARC'), 'ARC input': os.path.join(iteration_path, 'ARC', 'input.yml'), @@ -691,6 +698,7 @@ def determine_species_and_reactions_to_calculate(self) -> bool: bool: Whether additional calculations are required. """ species_keys, reaction_keys, coll_vio_spc_keys, coll_vio_rxn_keys = list(), list(), list(), list() + rxn_idt_keys = None self.rmg_species, self.rmg_reactions = self.load_species_and_reactions_from_chemkin_file() self.logger.info(f'This RMG model has {len(self.rmg_species)} species ' @@ -726,6 +734,9 @@ def determine_species_and_reactions_to_calculate(self) -> bool: # 1.2. SA if sa_observables_exist: species_keys.extend(self.determine_species_based_on_sa()) + if self.t3['sensitivity']['global_observables'].lower() == 'idt': + species_idt_keys, rxn_idt_keys = self.determine_params_based_on_sa_idt() + species_keys.extend(species_idt_keys) # 1.3. collision violators if self.t3['options']['collision_violators_thermo']: species_keys.extend(coll_vio_spc_keys) @@ -734,6 +745,10 @@ def determine_species_and_reactions_to_calculate(self) -> bool: # 2.1. SA if sa_observables_exist: reaction_keys.extend(self.determine_reactions_based_on_sa()) + if self.t3['sensitivity']['global_observables'].lower() == 'idt': + if rxn_idt_keys is None: + species_idt_keys, rxn_idt_keys = self.determine_params_based_on_sa_idt() + reaction_keys.extend(rxn_idt_keys) # 2.2. collision violators if self.t3['options']['collision_violators_rates']: reaction_keys.extend(coll_vio_rxn_keys) @@ -812,6 +827,58 @@ def determine_species_based_on_sa(self) -> List[int]: return species_keys + def determine_params_based_on_sa_idt(self) -> Tuple[List[int], List[int]]: + """ + Determine species or reactions to calculate based on sensitivity analysis of IDT. + + Returns: + List[int]: Entries are T3 species indices of species determined to be calculated based on IDT SA. + """ + visited_species, species_keys = list(), list() + visited_rxns, rxn_keys = list(), list() + if self.sa_dict_idt is None: + self.logger.error(f"T3's sa_dict_idt was None. Please check that the input file contains a proper " + f"'sensitivity' block, that 'IDT' was defined in the global_observables list, " + f"and/or that SA was run successfully.\n" + f"Not performing refinement based on IDT sensitivity analysis!") + return species_keys, rxn_keys + for token in ['thermo', 'kinetics']: + for r, reactor_idt_data in self.sa_dict_idt[token]['IDT'].items(): + for phi, phi_data in reactor_idt_data.items(): + for p, p_data in phi_data.items(): + for t, idt_data in p_data.items(): + for index, sa in idt_data.items(): + if token == 'thermo': + if index not in visited_species: + visited_species.append(index) + species = self.get_species_by_index(index) + if self.species_requires_refinement(species=species): + reason = f'(i {self.iteration}) IDT is sensitive to this species.' + key = self.add_species(species=species, reasons=reason) + if key is not None: + species_keys.append(key) + elif token == 'kinetics': + if index not in visited_rxns: + visited_rxns.append(index) + reaction = self.get_reaction_by_index(index) + if self.reaction_requires_refinement(reaction): + reason = f'(i {self.iteration}) IDT is sensitive to this reaction.' + key = self.add_reaction(reaction=reaction, reasons=reason) + if key is not None: + rxn_keys.append(key) + for spc in reaction.reactants + reaction.products: + spc_index = self.get_species_key(spc) + if spc_index not in visited_species: + visited_species.append(spc_index) + if self.species_requires_refinement(species=spc): + reason = f'(i {self.iteration}) IDT is sensitive to a reaction ' \ + f'({reaction}) in which this species participates.' + key = self.add_species(species=spc, reasons=reason) + if key is not None: + species_keys.append(key) + return species_keys, rxn_keys + + def determine_reactions_based_on_sa(self) -> List[int]: """ Determine reaction rate coefficients to calculate based on sensitivity analysis. @@ -1178,6 +1245,21 @@ def get_species_key(self, return key return None + def get_species_by_index(self, index) -> Optional[Species]: + """ + Get a species object by its T3 index. + + Args: + index (int): The species index. + + Returns: + Optional[Species]: The species object if it exists, ``None`` if it does not. + """ + for key, species_dict in self.species.items(): + if key == index: + return species_dict['object'] + return None + def get_reaction_key(self, reaction: Optional[Reaction] = None, label: Optional[str] = None, @@ -1206,6 +1288,21 @@ def get_reaction_key(self, return key return None + def get_reaction_by_index(self, index) -> Optional[Reaction]: + """ + Get a reaction object by its T3 index. + + Args: + index (int): The reaction index. + + Returns: + Optional[Reaction]: The reaction object if it exists, ``None`` if it does not. + """ + for key, reaction_dict in self.reactions.items(): + if key == index: + return reaction_dict['object'] + return None + def load_species_and_reactions_from_chemkin_file(self) -> Tuple[List[Species], List[Reaction]]: """ Load RMG Species and Reaction objects from the annotated Chemkin file. @@ -1251,7 +1348,7 @@ def add_species(self, ) -> Optional[int]: """ Add a species to self.species and to self.qm['species']. - If the species already exists in self.species, only the reasons to compute will be appended. + If the species already exists in self.species, only the reasons to compute it will be appended. Args: species (Species): The species to consider. diff --git a/t3/simulate/adapter.py b/t3/simulate/adapter.py index fd0949e9..d91ef614 100755 --- a/t3/simulate/adapter.py +++ b/t3/simulate/adapter.py @@ -5,6 +5,7 @@ """ from abc import ABC, abstractmethod +from typing import Optional class SimulateAdapter(ABC): @@ -27,7 +28,12 @@ def simulate(self): pass @abstractmethod - def get_sa_coefficients(self): + def get_sa_coefficients(self, + top_SA_species: int = 10, + top_SA_reactions: int = 10, + max_workers: int = 24, + save_yaml: bool = True, + ) -> Optional[dict]: """ Obtain the sensitivity analysis coefficients. diff --git a/t3/simulate/cantera_constantHP.py b/t3/simulate/cantera_constantHP.py index 836ae024..41e2d377 100644 --- a/t3/simulate/cantera_constantHP.py +++ b/t3/simulate/cantera_constantHP.py @@ -382,12 +382,23 @@ def simulate(self): self.all_data.append((time, condition_data, reaction_sensitivity_data, thermodynamic_sensitivity_data)) - def get_sa_coefficients(self): + def get_sa_coefficients(self, + top_SA_species: int = 10, + top_SA_reactions: int = 10, + max_workers: int = 24, + save_yaml: bool = True, + ) -> Optional[dict]: """ Obtain the SA coefficients. + Args: + top_SA_species (int, optional): The number of top sensitive species to return. + top_SA_reactions (int, optional): The number of top sensitive reactions to return. + max_workers (int, optional): The maximal number of workers to use for parallel processing. + save_yaml (bool, optional): Save the SA dictionary to a YAML file. + Returns: - sa_dict (dict): a SA dictionary, whose structure is given in the docstring for T3/t3/main.py + sa_dict (Optional[dict]): a SA dictionary, whose structure is given in the docstring for T3/t3/main.py """ sa_dict = {'kinetics': dict(), 'thermo': dict(), 'time': list()} @@ -411,7 +422,7 @@ def get_sa_coefficients(self): sa_dict['thermo'][observable_label] = dict() parameter = get_parameter_from_header(spc) sa_dict['thermo'][observable_label][parameter] = spc.data - + # todo: add save yaml to all adapters, use top SA species and reactions, change usage in main return sa_dict def get_idt_by_T(self): diff --git a/t3/simulate/cantera_constantTP.py b/t3/simulate/cantera_constantTP.py index 48a186a0..a19f5875 100644 --- a/t3/simulate/cantera_constantTP.py +++ b/t3/simulate/cantera_constantTP.py @@ -381,12 +381,23 @@ def simulate(self): self.all_data.append((time, condition_data, reaction_sensitivity_data, thermodynamic_sensitivity_data)) - def get_sa_coefficients(self): + def get_sa_coefficients(self, + top_SA_species: int = 10, + top_SA_reactions: int = 10, + max_workers: int = 24, + save_yaml: bool = True, + ) -> Optional[dict]: """ Obtain the SA coefficients. + Args: + top_SA_species (int, optional): The number of top sensitive species to return. + top_SA_reactions (int, optional): The number of top sensitive reactions to return. + max_workers (int, optional): The maximal number of workers to use for parallel processing. + save_yaml (bool, optional): Save the SA dictionary to a YAML file. + Returns: - sa_dict (dict): a SA dictionary, whose structure is given in the docstring for T3/t3/main.py + sa_dict (Optional[dict]): a SA dictionary, whose structure is given in the docstring for T3/t3/main.py """ sa_dict = {'kinetics': dict(), 'thermo': dict(), 'time': list()} diff --git a/t3/simulate/cantera_constantUV.py b/t3/simulate/cantera_constantUV.py index bffbed7a..a71dd7ff 100644 --- a/t3/simulate/cantera_constantUV.py +++ b/t3/simulate/cantera_constantUV.py @@ -381,12 +381,23 @@ def simulate(self): self.all_data.append((time, condition_data, reaction_sensitivity_data, thermodynamic_sensitivity_data)) - def get_sa_coefficients(self): + def get_sa_coefficients(self, + top_SA_species: int = 10, + top_SA_reactions: int = 10, + max_workers: int = 24, + save_yaml: bool = True, + ) -> Optional[dict]: """ Obtain the SA coefficients. + Args: + top_SA_species (int, optional): The number of top sensitive species to return. + top_SA_reactions (int, optional): The number of top sensitive reactions to return. + max_workers (int, optional): The maximal number of workers to use for parallel processing. + save_yaml (bool, optional): Save the SA dictionary to a YAML file. + Returns: - sa_dict (dict): a SA dictionary, whose structure is given in the docstring for T3/t3/main.py + sa_dict (Optional[dict]): a SA dictionary, whose structure is given in the docstring for T3/t3/main.py """ sa_dict = {'kinetics': dict(), 'thermo': dict(), 'time': list()} diff --git a/t3/simulate/rmg_constantTP.py b/t3/simulate/rmg_constantTP.py index 8c300234..3287c9a8 100755 --- a/t3/simulate/rmg_constantTP.py +++ b/t3/simulate/rmg_constantTP.py @@ -18,6 +18,8 @@ from rmgpy.tools.loader import load_rmg_py_job from rmgpy.tools.plot import plot_sensitivity +from arc.common import read_yaml_file, save_yaml_file + from t3.common import get_chem_to_rmg_rxn_index_map, get_species_by_label, get_values_within_range, \ get_observable_label_from_header, get_parameter_from_header, time_lapse from t3.simulate.adapter import SimulateAdapter @@ -224,10 +226,21 @@ def simulate(self): self.logger.info(f'Simulation via RMG completed, execution time: {time_lapse(tic)}') - def get_sa_coefficients(self) -> Optional[dict]: + def get_sa_coefficients(self, + top_SA_species: int = 10, + top_SA_reactions: int = 10, + max_workers: int = 24, + save_yaml: bool = True, + ) -> Optional[dict]: """ Obtain the SA coefficients. + Args: + top_SA_species (int, optional): The number of top sensitive species to return. + top_SA_reactions (int, optional): The number of top sensitive reactions to return. + max_workers (int, optional): The maximal number of workers to use for parallel processing. + save_yaml (bool, optional): Save the SA dictionary to a YAML file. + Returns: sa_dict (Optional[dict]): An SA dictionary, structure is given in the docstring for T3/t3/main.py """ @@ -265,6 +278,8 @@ def get_sa_coefficients(self) -> Optional[dict]: parameter = chem_to_rmg_rxn_index_map[int(parameter)] \ if all(c.isdigit() for c in parameter) else parameter sa_dict[sa_type][observable_label][parameter] = df[header].values + if save_yaml: + save_yaml_file(path=self.paths['SA dict'], content=sa_dict) return sa_dict def generate_rmg_reactors_for_simulation(self) -> dict: diff --git a/t3/simulate/rxn_idt.py b/t3/simulate/rxn_idt.py new file mode 100644 index 00000000..b3b9ab43 --- /dev/null +++ b/t3/simulate/rxn_idt.py @@ -0,0 +1,324 @@ +import os +import sys +import time +import cantera as ct +import numpy as np +import pandas as pd +import concurrent.futures +import rmgpy.chemkin +import subprocess + + +# get the table index from input for easy parallelization + +chemkin = sys.argv[1] +aramco = False +experimental_table_index = int(sys.argv[2]) + + +rxn_index_start = int(sys.argv[3]) + +working_dir = os.path.join(os.path.dirname(chemkin)) + + +# perturb every species and reaction in the mechanism +# we'll select the perturbations one at a time later in the script +def perturb_species(species, delta): + # takes in an RMG species object + # change the enthalpy offset + increase = None + for poly in species.thermo.polynomials: + new_coeffs = poly.coeffs + if not increase: + # Only define the increase in enthalpy once or you'll end up with numerical gaps in continuity + increase = delta * new_coeffs[5] + new_coeffs[5] += increase + poly.coeffs = new_coeffs + + +def perturb_reaction(rxn, delta): # 0.1 is default + # takes in an RMG reaction object + # delta is the ln(k) amount to perturb the A factor + # delta is a multiplicative factor- units don't matter, yay! + # does not deepycopy because there's some issues with rmgpy.reactions copying + if type(rxn.kinetics) == rmgpy.kinetics.chebyshev.Chebyshev: + rxn.kinetics.coeffs.value_si[0][0] += np.log10(1.0 + delta) + elif type(rxn.kinetics) in [rmgpy.kinetics.falloff.Troe, rmgpy.kinetics.falloff.ThirdBody, rmgpy.kinetics.falloff.Lindemann]: + if hasattr(rxn.kinetics, 'arrheniusHigh'): + rxn.kinetics.arrheniusHigh.A.value *= np.exp(delta) + if hasattr(rxn.kinetics, 'arrheniusLow'): + rxn.kinetics.arrheniusLow.A.value *= np.exp(delta) + elif type(rxn.kinetics) == rmgpy.kinetics.arrhenius.MultiArrhenius: + for j in range(len(rxn.kinetics.arrhenius)): + rxn.kinetics.arrhenius[j].A.value *= np.exp(delta) + elif type(rxn.kinetics) == rmgpy.kinetics.arrhenius.PDepArrhenius: + for j in range(len(rxn.kinetics.arrhenius)): + if type(rxn.kinetics.arrhenius[j]) == rmgpy.kinetics.arrhenius.Arrhenius: + rxn.kinetics.arrhenius[j].A.value *= np.exp(delta) + else: + raise ValueError(f'weird kinetics {str(rxn.kinetics)}') + elif type(rxn.kinetics) == rmgpy.kinetics.arrhenius.MultiPDepArrhenius: + for i in range(len(rxn.kinetics.arrhenius)): + for j in range(len(rxn.kinetics.arrhenius[i].arrhenius)): + if type(rxn.kinetics.arrhenius[i].arrhenius[j]) == rmgpy.kinetics.arrhenius.Arrhenius: + rxn.kinetics.arrhenius[i].arrhenius[j].A.value *= np.exp(delta) + elif type(rxn.kinetics.arrhenius[i].arrhenius[j]) == rmgpy.kinetics.arrhenius.MultiArrhenius: + for k in range(len(rxn.kinetics.arrhenius[i].arrhenius[j].arrhenius)): + rxn.kinetics.arrhenius[i].arrhenius[j].arrhenius[k].A.value *= np.exp(delta) + else: + raise ValueError(f'weird kinetics {str(rxn.kinetics)}') + + else: # Arrhenius + rxn.kinetics.A.value *= np.exp(delta) + + +transport = os.path.join(working_dir, 'tran.dat') +species_dict = os.path.join(working_dir, 'species_dictionary.txt') +species_list, reaction_list = rmgpy.chemkin.load_chemkin_file(chemkin, dictionary_path=species_dict, transport_path=transport, use_chemkin_names=True) +print(f'Loaded {len(species_list)} species, {len(reaction_list)} reactions') +base_yaml_path = os.path.join(working_dir, 'base.yaml') +perturbed_chemkin = os.path.join(working_dir, 'perturbed.inp') +perturbed_yaml_path = os.path.join(working_dir, 'perturbed.yaml') + +skip_create_perturb = False +if os.path.exists(perturbed_yaml_path): + skip_create_perturb = True + print('Perturbed yaml already exists, skipping creation of perturbed mechanism') + +if not skip_create_perturb: + if experimental_table_index == 7: # only do this once, instead of once for each of the 12 tables + # load the chemkin file and create a normal and perturbed cti for simulations: + # # write base cantera + subprocess.run(['ck2yaml', f'--input={chemkin}', f'--transport={transport}', f'--output={base_yaml_path}']) + + delta = 0.1 + for i in range(0, len(species_list)): + perturb_species(species_list[i], delta) + + for i in range(0, len(reaction_list)): + try: + perturb_reaction(reaction_list[i], delta) + except AttributeError: + continue + + # save the results + rmgpy.chemkin.save_chemkin_file(perturbed_chemkin, species_list, reaction_list, verbose=True, check_for_duplicates=True) + subprocess.run(['ck2yaml', f'--input={perturbed_chemkin}', f'--transport={transport}', f'--output={perturbed_yaml_path}']) + else: + while not os.path.exists(perturbed_yaml_path): + time.sleep(10) + + +# load the 2 ctis +base_gas = ct.Solution(base_yaml_path) +perturbed_gas = ct.Solution(perturbed_yaml_path) + + +# Take Reactor Conditions from Table 7 of supplementary info in +# https://doi-org.ezproxy.neu.edu/10.1016/j.combustflame.2010.01.016 +def run_simulation(T_orig, P_orig, X_orig): + # function to run a RCM simulation + + atols = [1e-15, 1e-15, 1e-18] + rtols = [1e-9, 1e-12, 1e-15] + for attempt_index in range(0, len(atols)): + T = T_orig + P = P_orig + X = X_orig + + # gas is a global object + t_end = 1.0 # time in seconds + base_gas.TPX = T, P, X + + reactor = ct.IdealGasReactor(base_gas) + reactor_net = ct.ReactorNet([reactor]) + reactor_net.atol = atols[attempt_index] + reactor_net.rtol = rtols[attempt_index] + + times = [0] + T = [reactor.T] + P = [reactor.thermo.P] + X = [reactor.thermo.X] # mol fractions + MAX_STEPS = 10000 + step_count = 0 + failed = False + while reactor_net.time < t_end: + try: + reactor_net.step() + except ct._cantera.CanteraError: + print(f'Reactor failed to solve! {attempt_index}') + failed = True + break + # print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!') + # return 0 + + times.append(reactor_net.time) + T.append(reactor.T) + P.append(reactor.thermo.P) + X.append(reactor.thermo.X) + + step_count += 1 + if step_count > MAX_STEPS: + print(f'Too many steps! Reactor failed to solve! {attempt_index}') + # print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!') + failed = True + break + # return 0 + + if not failed: + slopes = np.gradient(P, times) + delay_i = np.argmax(slopes) + return times[delay_i] + print(f'trying again {attempt_index}') + + print('Reactor failed to solve after many attempts!') + print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!') + return 0 + + +# Load the experimental conditions +ignition_delay_data = os.path.join(os.environ['AUTOSCIENCE_REPO'], 'experiment', 'butane_ignition_delay.csv') +df_exp = pd.read_csv(ignition_delay_data) +table_exp = df_exp[df_exp['Table'] == experimental_table_index] +# Define Initial conditions using experimental data +tau_exp = table_exp['time (ms)'].values.astype(float) # ignition delay +T7 = table_exp['T_C'].values # Temperatures +P7 = table_exp['nominal pressure(atm)'].values * ct.one_atm # pressures in atm +phi7 = table_exp['phi'].values # equivalence ratios +# list of starting conditions +# Mixture compositions taken from table 2 of +# https://doi-org.ezproxy.neu.edu/10.1016/j.combustflame.2010.01.016 +concentrations = [] +if not aramco: + if phi7[0] == 0.3: + x_diluent = 0.7821 + conc_dict = { + 'O2(2)': 0.2083, + 'butane(1)': 0.00962 + } + elif phi7[0] == 0.5: + x_diluent = 0.7771 + conc_dict = { + 'O2(2)': 0.2070, + 'butane(1)': 0.01595 + } + elif phi7[0] == 1.0: + x_diluent = 0.7649 + conc_dict = { + 'O2(2)': 0.2038, + 'butane(1)': 0.03135 + } + elif phi7[0] == 2.0: + x_diluent = 0.7416 + conc_dict = { + 'O2(2)': 0.1976, + 'butane(1)': 0.06079 + } + else: + raise ValueError + for i in range(0, len(table_exp)): + x_N2 = table_exp['%N2'].values[i] / 100.0 * x_diluent + x_Ar = table_exp['%Ar'].values[i] / 100.0 * x_diluent + x_CO2 = table_exp['%CO2'].values[i] / 100.0 * x_diluent + conc_dict['N2'] = x_N2 + conc_dict['Ar'] = x_Ar + conc_dict['CO2(7)'] = x_CO2 + concentrations.append(conc_dict) +else: + if phi7[0] == 0.3: + x_diluent = 0.7821 + conc_dict = { + 'O2': 0.2083, + 'C4H10': 0.00962 + } + elif phi7[0] == 0.5: + x_diluent = 0.7771 + conc_dict = { + 'O2': 0.2070, + 'C4H10': 0.01595 + } + elif phi7[0] == 1.0: + x_diluent = 0.7649 + conc_dict = { + 'O2': 0.2038, + 'C4H10': 0.03135 + } + elif phi7[0] == 2.0: + x_diluent = 0.7416 + conc_dict = { + 'O2': 0.1976, + 'C4H10': 0.06079 + } + else: + raise ValueError + for i in range(0, len(table_exp)): + x_N2 = table_exp['%N2'].values[i] / 100.0 * x_diluent + x_Ar = table_exp['%Ar'].values[i] / 100.0 * x_diluent + x_CO2 = table_exp['%CO2'].values[i] / 100.0 * x_diluent + conc_dict['N2'] = x_N2 + conc_dict['AR'] = x_Ar + conc_dict['CO2'] = x_CO2 + concentrations.append(conc_dict) + +# just use the first concentration +Tmax = 1077 # use min and max temperature range of the data: 663K-1077K +Tmin = 663 +N = 51 +temperatures = np.linspace(Tmin, Tmax, N) + + +def same_reaction(rxn1, rxn2): + """Returns true IFF reactions have same reactants, products, and type""" + if rxn1.reactants == rxn2.reactants and rxn1.products == rxn2.products and type(rxn1) == type(rxn2): + return True + else: + return False + + +# compute and save the delays +species_delays = np.zeros((len(perturbed_gas.species()), len(temperatures))) +reaction_delays = np.zeros((len(perturbed_gas.reactions()), len(temperatures))) + +table_dir = os.path.join(working_dir, f'table_{experimental_table_index:04}') +os.makedirs(table_dir, exist_ok=True) + +for i in range(rxn_index_start, min(rxn_index_start + 50, len(perturbed_gas.reactions()))): + print(f'perturbing {i} {perturbed_gas.reactions()[i]}') + # TODO skip the ones that haven't actually been perturbed because PDEP or whatever + try: + a = base_gas.reactions()[i].rate + except AttributeError: + print(f'skipping reaction {i}: {base_gas.reactions()[i]} because it is not perturbed from the base mechanism') + continue + + # load the base gas + base_gas = ct.Solution(base_yaml_path) + # order is not preserved between the mechanisms, so we have to find the reaction that matches + if same_reaction(perturbed_gas.reactions()[i], base_gas.reactions()[i]): + perturbed_index = i + else: + for k in range(0, len(base_gas.reactions())): + if same_reaction(perturbed_gas.reactions()[k], base_gas.reactions()[i]): + perturbed_index = k + break + else: + raise ValueError('Could not find matching reaction in base mechanism') + + base_gas.modify_reaction(i, perturbed_gas.reactions()[perturbed_index]) + + # Run all simulations in parallel + delays = np.zeros(len(temperatures)) + condition_indices = np.arange(0, len(temperatures)) + + with concurrent.futures.ProcessPoolExecutor(max_workers=26) as executor: + for condition_index, delay_time in zip(condition_indices, executor.map( + run_simulation, + [temperatures[j] for j in condition_indices], + [P7[0] for j in condition_indices], + [concentrations[0] for j in condition_indices] + )): + delays[condition_index] = delay_time + reaction_delays[i, :] = delays + +# save the result as a numpy thing +np.save(os.path.join(table_dir, f'reaction_delays_{experimental_table_index:04}_{rxn_index_start:04}.npy'), reaction_delays) \ No newline at end of file