diff --git a/Examples/Ex_Params1.txt b/Examples/Ex_Params1.txt index 33be31b..842e504 100644 --- a/Examples/Ex_Params1.txt +++ b/Examples/Ex_Params1.txt @@ -1,5 +1,6 @@ NumMinerals,4 -CoolingType,Linear +CoolingFile,/home/gabriel/PycharmProjects/FastGrainBoundary-DiffusionSolver/sensitivity_analysis/cooling_file_GELP.txt +CoolingType,Custom WholeRock,12.8 ModelDuration,4.0 StartingTemp,700 diff --git a/Examples/Ex_Params4.txt b/Examples/Ex_Params4.txt index 7c29227..b032360 100644 --- a/Examples/Ex_Params4.txt +++ b/Examples/Ex_Params4.txt @@ -1,5 +1,6 @@ NumMinerals,4 -CoolingType,Linear +CoolingFile,/home/gabriel/PycharmProjects/FastGrainBoundary-DiffusionSolver/Inverse_InitialSol/Initial00.txt +CoolingType,Custom WholeRock,12.8 ModelDuration,4.0 StartingTemp,700 diff --git a/eiler_SA/__init__.py b/eiler_SA/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/eiler_SA/add_cooling_file_to_params.py b/eiler_SA/add_cooling_file_to_params.py new file mode 100644 index 0000000..985838b --- /dev/null +++ b/eiler_SA/add_cooling_file_to_params.py @@ -0,0 +1,53 @@ +import os + + +"""Once you've made a ton of config files changing mode and length based off a config template add a cooling file to + those parameter files with this script. Once you've run this script, run frwrd_bulk_run.py""" + +config_file_locations = '/home/gabriel/PycharmProjects/FastGrainBoundary-DiffusionSolver/sensitivity_analysis/mode_length_chloe/blockD/' +config_output = '/home/gabriel/PycharmProjects/FastGrainBoundary-DiffusionSolver/sensitivity_analysis/mode_length_chloe_cooling/blockD/' +if not os.path.exists(config_output): + os.mkdir(config_output) +cooling_file_location = '/home/gabriel/PycharmProjects/FastGrainBoundary-DiffusionSolver/sensitivity_analysis/cooling_file_GELP.txt' +# cooling_file_startemp = 650.0 +# cooling_file_endtemp = 200.0 +# cooling_file_timestep = 0.0005 + +for file in os.listdir(config_file_locations): + + print('adding a cooling file to config file: {}'.format(file)) + + path = os.path.join(config_file_locations, file) + + lines_to_write = [] + with open(path, 'r') as rfile: + + for line in rfile: + if line.startswith('CoolingFile,'): + line = 'CoolingFile,' + line = '{}{}\n'.format(line, cooling_file_location) + lines_to_write.append(line) + elif line.startswith('CoolingType,'): + type_str = line.split(',')[0] + line = '{},Custom\n'.format(type_str) + lines_to_write.append(line) + # elif line.startswith('ModelDuration'): + # pass + # elif line.startswith('EndTemp'): + # lines_to_write.append('EndTemp,{}\n'.format(cooling_file_endtemp)) + # elif line.startswith('StartingTemp'): + # lines_to_write.append('StartingTemp,{}\n'.format(cooling_file_startemp)) + # elif line.startswith('TimeStep'): + # lines_to_write.append('TimeStep,{}\n'.format(cooling_file_timestep)) + else: + lines_to_write.append(line) + + outpath = os.path.join(config_output, file) + print(lines_to_write) + with open(outpath, 'w') as wfile: + + for line in lines_to_write: + wfile.write(line) + + +print('done!!!') diff --git a/eiler_SA/array_scratch.py b/eiler_SA/array_scratch.py new file mode 100644 index 0000000..975f6f4 --- /dev/null +++ b/eiler_SA/array_scratch.py @@ -0,0 +1,93 @@ +# =============================================================================== +# Copyright 2019 Jan Hendrickx and Gabriel Parrish +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# =============================================================================== +import os +import numpy as np +import matplotlib.pyplot as plt +# ============= standard library imports ======================== +from eiler_SA.forwardmodeling_sa import make_params_dict, generate_synthetic_data, write_params_file, sample_locator + +#root = '/Users/dcadol/Desktop/academic_docs_II/FGB_model/JohnEiler/plag_hornblende_sensitivity' +root = r'/home/gabriel/PycharmProjects/FastGrainBoundary-DiffusionSolver/sensitivity_analysis/original_eiler/' + +# year in MA we want to see +year_ma = 44 +time_step = 0.0005 + +# convert the year into the index for the y array. Subtract one bc the y array starts at index 0 +year_index = int(round(((year_ma / time_step) - 1), 1)) + +print('year index {}'.format(year_index)) + +x_path = os.path.join(root, 'Eiler94_Amphibolite_x.npy') +y_path = os.path.join(root, 'Eiler94_Amphibolite_y.npy') +time_path = os.path.join(root, 'Eiler94_Amphibolite_time.npy') + +param_path = os.path.join(root, 'Eiler94_Amphibolite.txt') +param_dict = make_params_dict(param_path) + +x_arr = np.load(x_path) +y_arr = np.load(y_path) +t_arr = np.load(time_path) + +print('x shape: {}, y shape {}, t shape {}'.format(x_arr.shape, y_arr.shape, t_arr.shape)) + +# quartz +plt.plot(x_arr[:, 0], y_arr[0, year_index, :]) +#plt.show() + +# TODO - construct inverse thingy + +# 1) Start with a known time-temp history - in our case linear +# 2) Run the forward model +# 3) Keep a sample of forward model outputs at late-time +# 4) Add noise to the data (100% - full noise & 20% - low noise) +# 5) Run the inverse solver on the noisy data with an initial time-temp solution + + +# Of the last time slice of the forward model we take data as a txt file, only keeping some points +# | Distance | D18O | Uncertainty | + +x_quartz = x_arr[:, 0] +# print(x_quartz) +y_quartz = y_arr[0, year_index, :] + +x_plag = x_arr[:, 1] +y_plag = y_arr[1, year_index, :] + +x_horn = x_arr[:, 2] +y_horn = y_arr[2, year_index, :] + +x_mag = x_arr[:, 3] +y_mag = y_arr[3, year_index, :] + +quartz_sample_locations = [1500, 1600, 1700, 1800, 1900, 2000] +plag_sample_locations = [2000, 2100, 2200, 2300, 2400, 2500, 2600, 2700, 2800, 2900, 3000] +horn_sample_locations = [3500, 3600, 3700, 3800, 3900, 4000] +mag_sample_locations = [400, 500, 600] + +output_location = root + +sample_locs = sample_locator(x_plag) +print('sample locations \n', sample_locs) + +name = 'Eiler94_Amphibolite_plag_lownoise' + +print('x plagioclase', x_plag) +generate_synthetic_data(x_arr=x_plag, y_arr=y_plag, noise=0.01, sample_locations=sample_locs, + output_location=output_location, output_name=name) + + + diff --git a/eiler_SA/bulk_sampler.py b/eiler_SA/bulk_sampler.py new file mode 100644 index 0000000..87a55ec --- /dev/null +++ b/eiler_SA/bulk_sampler.py @@ -0,0 +1,114 @@ +# =============================================================================== +# Copyright 2019 Gabriel Kropf and Gabriel Parrish +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# =============================================================================== +import os +import numpy as np +# ============= standard library imports ======================== +from eiler_SA.forwardmodeling_sa import make_params_dict, write_params_file +from eiler_SA.forwardmodeling_sa import make_params_dict, generate_synthetic_data, write_params_file + +if __name__ == "__main__": + + file_root = '/home/gabriel/PycharmProjects/FastGrainBoundary-DiffusionSolver/sensitivity_analysis/modality/modality_fwd_results/' + # # HORNBLENDE + # output_location = '/home/gabriel/PycharmProjects/FastGrainBoundary-DiffusionSolver/sensitivity_analysis/modality/sa_sampled/hornblende' + # sample_mineral = 'horn' + #PLAG + output_location= '/home/gabriel/PycharmProjects/FastGrainBoundary-DiffusionSolver/sensitivity_analysis/modality/sa_sampled/plagioclase' + sample_mineral = 'plag' + xfile_lst = [] + yfile_lst = [] + xname_lst = [] + yname_lst = [] + ### ================ MODALITY ================ + # build Filenames for modality SA + plagioclase_mode_start = 0.1 + hornblende_mode_start = 0.9 + step = 0.1 + div = int(max(hornblende_mode_start, plagioclase_mode_start) / step) + print(div) + for i in range(div): + + # subract from plag and and to hornblende to change the relative concentration + delta_mode = float(i * step) + # delta_mode = round(delta_mode, 1) + plag_mode = plagioclase_mode_start + delta_mode + plag_mode = round(plag_mode, 1) + hornb_mode = hornblende_mode_start - delta_mode + hornb_mode = round(hornb_mode, 1) + + vars = ['x', 'y'] + + for v in vars: + print('plag mode: {}, hornb mode {}'.format(plag_mode, hornb_mode)) + filename = 'eiler94_p{}_h{}_{}.npy'.format(str(plag_mode)[-1], str(hornb_mode)[-1], v) + name = 'eiler94_p{}_h{}'.format(str(plag_mode)[-1], str(hornb_mode)[-1]) + # name_lst.append(name) + + if v == 'x': + xarrpath = os.path.join(file_root, filename) + xfile_lst.append(xarrpath) + xname_lst.append(name) + if v == 'y': + yarrpath = os.path.join(file_root, filename) + yfile_lst.append(yarrpath) + yname_lst.append(name) + + print(xfile_lst, '\n', yfile_lst) + + ###=========== SAMPLING ================ + + # year in MA we want to see + year_ma = 44 + time_step = 0.0005 + + # convert the year into the index for the y array. Subtract one bc the y array starts at index 0 + year_index = int(round(((year_ma / time_step) - 1), 1)) + + print(len(xfile_lst), len(yfile_lst), len(xname_lst), len(yname_lst)) + + for x_arr_file, y_arr_file, xn, yn in zip(xfile_lst, yfile_lst, xname_lst, yname_lst): + + x_arr = np.load(x_arr_file) + y_arr = np.load(y_arr_file) + + x_quartz = x_arr[:, 0] + # print(x_quartz) + y_quartz = y_arr[0, year_index, :] + + x_plag = x_arr[:, 1] + y_plag = y_arr[1, year_index, :] + + x_horn = x_arr[:, 2] + y_horn = y_arr[2, year_index, :] + + x_mag = x_arr[:, 3] + y_mag = y_arr[3, year_index, :] + + # TODO - Function to generate automatic and consistent plag hornblende sample locations + + plag_sample_locations = [2000, 2100, 2200, 2300, 2400, 2500, 2600, 2700, 2800, 2900, 3000] + horn_sample_locations = [3500, 3600, 3700, 3800, 3900, 4000] + + outname = '{}_{}'.format(sample_mineral, xn) + + if sample_mineral == 'plag': + + generate_synthetic_data(x_arr=x_plag, y_arr=y_plag, noise=0.14, sample_locations=plag_sample_locations, + output_location=output_location, output_name=outname) + + elif sample_mineral == 'horn': + generate_synthetic_data(x_arr=x_horn, y_arr=y_horn, noise=0.14, sample_locations=horn_sample_locations, + output_location=output_location, output_name=outname) \ No newline at end of file diff --git a/eiler_SA/bulk_sampler_chloe_AGU.py b/eiler_SA/bulk_sampler_chloe_AGU.py new file mode 100644 index 0000000..315b086 --- /dev/null +++ b/eiler_SA/bulk_sampler_chloe_AGU.py @@ -0,0 +1,79 @@ +import os +import numpy as np +from eiler_SA.forwardmodeling_sa import make_params_dict, write_params_file, sample_locator, generate_synthetic_data + +file_root = '/home/gabriel/PycharmProjects/FastGrainBoundary-DiffusionSolver/sensitivity_analysis/modality/modality_len_results_fwd/blockA' + +output_root = '/home/gabriel/PycharmProjects/FastGrainBoundary-DiffusionSolver/sensitivity_analysis/modality/modality_len_results_fwd_sampled/blockA/' +if not os.path.exists(output_root): + os.mkdir(output_root) + +y_files = [] +x_files = [] +time_files = [] +xname_lst = [] +yname_lst = [] +for i in os.listdir(file_root): + + p = os.path.join(file_root, i) + n = i.split('.')[0] + if i.endswith('_x.npy'): + x_files.append(p) + xname_lst.append(n) + elif i.endswith('_y.npy'): + y_files.append(p) + yname_lst.append(n) + elif i.endswith('_time.npy'): + time_files.append(p) + # time_name_lsit + +yfile_lst = sorted(y_files) +yname_lst = sorted(yname_lst) +xfile_lst = sorted(x_files) +xname_lst = sorted(xname_lst) +time_files = sorted(time_files) + +print(yfile_lst) +print(yname_lst) +print(xfile_lst) +print(xname_lst) + + +###=========== SAMPLING ================ + +# year in MA we want to see +year_ma = 44 +time_step = 0.0005 + +# convert the year into the index for the y array. Subtract one bc the y array starts at index 0 +year_index = int(round(((year_ma / time_step) - 1), 1)) + +print(len(xfile_lst), len(yfile_lst), len(xname_lst), len(yname_lst)) + +for x_arr_file, y_arr_file, xn, yn in zip(xfile_lst, yfile_lst, xname_lst, yname_lst): + + x_arr = np.load(x_arr_file) + y_arr = np.load(y_arr_file) + + x_quartz = x_arr[:, 0] + # print(x_quartz) + y_quartz = y_arr[0, year_index, :] + + x_plag = x_arr[:, 1] + y_plag = y_arr[1, year_index, :] + + x_horn = x_arr[:, 2] + y_horn = y_arr[2, year_index, :] + + x_mag = x_arr[:, 3] + y_mag = y_arr[3, year_index, :] + + plag_sample_locations = sample_locator(x_plag) + horn_sample_locations = sample_locator(x_horn) + + outname = '{}_{}'.format('plag', xn) + generate_synthetic_data(x_arr=x_plag, y_arr=y_plag, noise=0.14, sample_locations=plag_sample_locations, + output_location=output_root, output_name=outname) + outname = '{}_{}'.format('horn', xn) + generate_synthetic_data(x_arr=x_horn, y_arr=y_horn, noise=0.14, sample_locations=horn_sample_locations, + output_location=output_root, output_name=outname) \ No newline at end of file diff --git a/eiler_SA/bulk_sampler_len_modal.py b/eiler_SA/bulk_sampler_len_modal.py new file mode 100644 index 0000000..d988954 --- /dev/null +++ b/eiler_SA/bulk_sampler_len_modal.py @@ -0,0 +1,115 @@ +import os +import numpy as np +from eiler_SA.forwardmodeling_sa import make_params_dict, write_params_file, sample_locator, generate_synthetic_data + +file_root = '/home/gabriel/PycharmProjects/FastGrainBoundary-DiffusionSolver/sensitivity_analysis/modality/modality_len_results_fwd/' +# HORNBLENDE +horn_output_location = '/home/gabriel/PycharmProjects/FastGrainBoundary-DiffusionSolver/sensitivity_analysis/full_sampled/hornblende' +# sample_mineral = 'horn' +# PLAG +plag_output_location= '/home/gabriel/PycharmProjects/FastGrainBoundary-DiffusionSolver/sensitivity_analysis/full_sampled/plagioclase' +# sample_mineral = 'plag' +# 0.3 - base case + +plagioclase_mode_start = 0.1 +# 0.6 - base case +hornblende_mode_start = 0.9 + +plag_length_start = 200.0 +horn_length_start = 500.0 + +lengthstep = 100.0 +# this will be the step by which we change the relative mode of plag to hornblende +step = 0.1 + +leng_div = int((max(horn_length_start, plag_length_start) - min(horn_length_start, plag_length_start))/lengthstep) +div = int(max(hornblende_mode_start, plagioclase_mode_start) / step) +print(div) + + +xfile_lst = [] +yfile_lst = [] +xname_lst = [] +yname_lst = [] + +for j in range(leng_div + 1): + + delta_len = float(j * lengthstep) + + plag_len = plag_length_start + delta_len + horn_len = horn_length_start - delta_len + + # for each length, change the modality around + for i in range(div): + # print(i) + + # subract from plag and and to hornblende to change the relative concentration + delta_mode = float(i * step) + # delta_mode = round(delta_mode, 1) + plag_mode = plagioclase_mode_start + delta_mode + plag_mode = round(plag_mode, 1) + hornb_mode = hornblende_mode_start - delta_mode + hornb_mode = round(hornb_mode, 1) + + print('plag mode: {}, hornb mode {}'.format(plag_mode, hornb_mode)) + + filename = 'eiler94_p{}_h{}_pl{}_hl{}'.format(str(plag_mode)[-1], str(hornb_mode)[-1], int(plag_len), int(horn_len)) + + vars = ['x', 'y'] + + for v in vars: + # print('plag mode: {}, hornb mode {}'.format(plag_mode, hornb_mode)) + filename = 'eiler94_p{}_h{}_pl{}_hl{}_{}.npy'.format(str(plag_mode)[-1], str(hornb_mode)[-1], int(plag_len), int(horn_len), v) + print(filename) + name = filename.split('.')[0] + # name_lst.append(name) + + if v == 'x': + xarrpath = os.path.join(file_root, filename) + xfile_lst.append(xarrpath) + xname_lst.append(name) + if v == 'y': + yarrpath = os.path.join(file_root, filename) + yfile_lst.append(yarrpath) + yname_lst.append(name) + + print(xfile_lst, '\n', yfile_lst) + +###=========== SAMPLING ================ + +# year in MA we want to see +year_ma = 44 +time_step = 0.0005 + +# convert the year into the index for the y array. Subtract one bc the y array starts at index 0 +year_index = int(round(((year_ma / time_step) - 1), 1)) + +print(len(xfile_lst), len(yfile_lst), len(xname_lst), len(yname_lst)) + +for x_arr_file, y_arr_file, xn, yn in zip(xfile_lst, yfile_lst, xname_lst, yname_lst): + + x_arr = np.load(x_arr_file) + y_arr = np.load(y_arr_file) + + x_quartz = x_arr[:, 0] + # print(x_quartz) + y_quartz = y_arr[0, year_index, :] + + x_plag = x_arr[:, 1] + y_plag = y_arr[1, year_index, :] + + x_horn = x_arr[:, 2] + y_horn = y_arr[2, year_index, :] + + x_mag = x_arr[:, 3] + y_mag = y_arr[3, year_index, :] + + plag_sample_locations = sample_locator(x_plag) + horn_sample_locations = sample_locator(x_horn) + + outname = '{}_{}'.format('plag', xn) + generate_synthetic_data(x_arr=x_plag, y_arr=y_plag, noise=0.14, sample_locations=plag_sample_locations, + output_location=plag_output_location, output_name=outname) + outname = '{}_{}'.format('horn', xn) + generate_synthetic_data(x_arr=x_horn, y_arr=y_horn, noise=0.14, sample_locations=horn_sample_locations, + output_location=horn_output_location, output_name=outname) \ No newline at end of file diff --git a/eiler_SA/check arrs.py b/eiler_SA/check arrs.py new file mode 100644 index 0000000..4d1038c --- /dev/null +++ b/eiler_SA/check arrs.py @@ -0,0 +1,17 @@ +import numpy as np +import os + +root = '/home/gabriel/PycharmProjects/FastGrainBoundary-DiffusionSolver/sensitivity_analysis/modality/modality_len_results_fwd/' +rname = 'eiler94_p8_h2_pl400_hl300' +param_root = '/home/gabriel/PycharmProjects/FastGrainBoundary-DiffusionSolver/sensitivity_analysis/mode_length_configs_cooling/' + +numpy_xarr = os.path.join(root, '{}_x.npy'.format(rname)) +numpy_xarr = np.load(numpy_xarr) +numpy_yarr = os.path.join(root, '{}_y.npy'.format(rname)) +numpy_yarr = np.load(numpy_yarr) +numpy_tarr = os.path.join(root, '{}_time.npy'.format(rname)) +numpy_tarr = np.load(numpy_tarr) + +print('np x \n', numpy_xarr, '\n np y \n', numpy_yarr, '\nnp t \n', numpy_tarr) + +print(len(numpy_xarr), len(numpy_yarr), len(numpy_tarr)) \ No newline at end of file diff --git a/eiler_SA/forwardmodeling_bulkplot.py b/eiler_SA/forwardmodeling_bulkplot.py new file mode 100644 index 0000000..a2f22a6 --- /dev/null +++ b/eiler_SA/forwardmodeling_bulkplot.py @@ -0,0 +1,105 @@ +# =============================================================================== +# Copyright 2019 Jan Hendrickx and Gabriel Parrish +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# =============================================================================== +import os +import numpy as np +import matplotlib.pyplot as plt +# ============= standard library imports ======================== +from eiler_SA.forwardmodeling_sa import make_params_dict, generate_synthetic_data, write_params_file + + +# root = '/Users/dcadol/Desktop/academic_docs_II/FGB_model/JohnEiler/plag_hornblende_sensitivity' +# root = '/home/gabriel/Documents/FGB_model/JohnEiler/modality_SF/plag_hornblende_sensitivity_SF/' +root = '/home/gabriel/PycharmProjects/FastGrainBoundary-DiffusionSolver/sensitivity_analysis/modality/modality_len_results_fwd/' +rname = 'eiler94_p8_h2_pl400_hl300' +param_root = '/home/gabriel/PycharmProjects/FastGrainBoundary-DiffusionSolver/sensitivity_analysis/mode_length_configs_cooling/' + +# years in MA we want to see +year_ma = [10, 20, 45] +time_step = 0.0005 + +# convert the year into the index for the y array. Subtract one bc the y array starts at index 0 +yindex_lst = [] +for y in year_ma: + + # y-1 accounts for the fact that the year is in order of a python index, so starts at 0 and not 1 like time + y_index = int(round((((y - 1) / time_step) - 1), 1)) + yindex_lst.append(y_index) + +print('year index {}'.format(yindex_lst)) + +x_path = os.path.join(root, '{}_x.npy'.format(rname)) +y_path = os.path.join(root, '{}_y.npy'.format(rname)) +time_path = os.path.join(root, '{}_time.npy'.format(rname)) + +param_path = os.path.join(param_root, '{}.txt'.format(rname)) +param_dict = make_params_dict(param_path) + +x_arr = np.load(x_path) +y_arr = np.load(y_path) +t_arr = np.load(time_path) + +print('x shape: {}, y shape {}, t shape {}'.format(x_arr.shape, y_arr.shape, t_arr.shape)) + +# quartz +fig, ax = plt.subplots() +for year_index, y in zip(yindex_lst, year_ma): + ax.plot(x_arr[:, 0], y_arr[0, year_index, :], label='{} million years'.format(y)) +ax.set_title('Plot of Oxygen Isotope Ratios for Quartz - {}'.format(rname)) +ax.set_ylabel('Delta 18-O') +ax.set_xlabel('x (mm)') +plt.legend(loc='best') +plt.grid(True) +plt.savefig(os.path.join(root, 'quartz_{}.png'.format(rname))) +plt.show() + + +# Plag +fig, ax = plt.subplots() +for year_index, y in zip(yindex_lst, year_ma): + ax.plot(x_arr[:, 1], y_arr[1, year_index, :], label='{} million years'.format(y)) +ax.set_title('Plot of Oxygen Isotope Ratios for Plagioclase - {}'.format(rname)) +ax.set_ylabel('Delta 18-O') +ax.set_xlabel('x (mm)') +plt.legend(loc='best') +plt.grid(True) +plt.savefig(os.path.join(root, 'plag_{}.png'.format(rname))) +plt.show() + +# Hornblende +fig, ax = plt.subplots() +for year_index, y in zip(yindex_lst, year_ma): + ax.plot(x_arr[:, 2], y_arr[2, year_index, :], label='{} million years'.format(y)) +ax.set_title('Plot of Oxygen Isotope Ratios for Hornblende - {}'.format(rname)) +ax.set_ylabel('Delta 18-O') +ax.set_xlabel('x (mm)') +plt.legend(loc='best') +plt.grid(True) +plt.savefig(os.path.join(root, 'hornblende_{}.png'.format(rname))) +plt.show() + +# titanite +fig, ax = plt.subplots() +for year_index, y in zip(yindex_lst, year_ma): + ax.plot(x_arr[:, 3], y_arr[3, year_index, :], label='{} million years'.format(y)) +ax.set_title('Plot of Oxygen Isotope Ratios for Titanite - {}'.format(rname)) +ax.set_ylabel('Delta 18-O') +ax.set_xlabel('x (mm)') +plt.legend(loc='best') +plt.grid(True) +plt.savefig(os.path.join(root, 'titanite_{}.png'.format(rname))) +plt.show() + + diff --git a/eiler_SA/forwardmodeling_sa.py b/eiler_SA/forwardmodeling_sa.py new file mode 100644 index 0000000..bc71eed --- /dev/null +++ b/eiler_SA/forwardmodeling_sa.py @@ -0,0 +1,415 @@ +# =============================================================================== +# Copyright 2019 Gabriel Kropf and Gabriel Parrish +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# =============================================================================== +from numpy import * +from tkinter import ttk +import pandas +from scipy import interpolate +from scipy import optimize +import matplotlib.pyplot as plt +import os +# ============= standard library imports ======================== +from modelfunctions import forwardmodel_fast, forwardmodel_slow, find_inverses + +def forward_model_slow_bulk(params, coolfile=False): + + """ + A forward model implementation that accepts a dictionary params based on a standard params .txt file + :param params: Dictionary + :return: 3 arrays of the model results + """ + + print('running forward model slow bulk for params file: {}'.format(params)) + + def fluxbal(z, e, f, h, j, k, l): + X = zeros([z]) + A = zeros([z, z]) + for m in range(0, z - 1): + A[m, 0] = 1 + A[m, m + 1] = -1 + A[z - 1, :] = j * k * l * f + B = zeros([z]) + B[0:z - 1] = h[1:z] + B[z - 1] = sum(j * k * l * f * e) + X = linalg.solve(A, B) + return X + + # get user defined info TODO - remove .get() method + if not coolfile: + ttot = float(params['ModelDuration']) + dt = float(params['TimeStep']) + WRd180 = float(params['WholeRock']) + Tstart = float(params['StartingTemp']) + Tend = float(params['EndTemp']) + nmin = int(params['NumMinerals']) + de = 100 + else: + WRd180 = float(params['WholeRock']) + Tstart = float(params['StartingTemp']) + Tend = float(params['EndTemp']) + dt = float(params['TimeStep']) + nmin = int(params['NumMinerals']) + de = 100 + + cool_file = params['CoolingFile'] + print('cool file is {}'.format(cool_file)) + if params['CoolingType'] == "Custom": + print('custom cooling file!') + # read data in as matrix without using pandas + file = open(cool_file, 'r', encoding='ISO-8859-1') + raw = file.read() + raw_lines = raw.split('\n') + raw_data = [x.split(',') for x in raw_lines[0:-1]] + # raw_data = [x.split(' ') for x in raw_lines[0:-1]] + segs = array([[float(x) for x in y] for y in raw_data]) + + # compute cooling steps + [rw, cl] = segs.shape; + segtimes = divide(segs[:, 0], dt) + segtimes = [int(round(x)) for x in segtimes] + SegDTdt = [] + for p in range(0, rw): + thisseg = ones(segtimes[p]) * segs[p][1] + SegDTdt = concatenate((SegDTdt, thisseg)) + tend = sum(segtimes); + ttot = tend * dt; + + # unit definitions and converions + deltat = dt * 3.1536e+13 + tend = math.ceil(ttot / dt) + Tstart = Tstart + 273 + Tend = Tend + 273 + T0 = Tstart + T = Tstart + + # initialize storage matrices + mode = zeros([nmin]) + shape = zeros([nmin]).astype(int) + L = zeros([nmin]) + w = zeros([nmin]) + r = zeros([nmin]) + SA = zeros([nmin]) + dx = zeros([nmin]) + gb = zeros([nmin]) + d180 = zeros([nmin]) + Afac = zeros([nmin]) + Bfac = zeros([nmin]) + Cfac = zeros([nmin]) + D0 = zeros([nmin]) + Q = zeros([nmin]) + D = zeros([nmin]) + fracfax = zeros([nmin]) + oxcon = zeros([nmin]) + R = 8.3144621 # J/K*ml + + ## get all rock properties + + for j in range(0, nmin): + mode[j] = float(params['Min' + str(j) + '-Mode']) + if params['Min' + str(j) + '-Shape'] == "Spherical": + shape[j] = 1 + if params['Min' + str(j) + '-Shape'] == "Slab": + shape[j] = 2 + r[j] = params['Min' + str(j) + '-R'] + L[j] = 2 * r[j] + w[j] = params['Min' + str(j) + '-W'] + dx[j] = r[j] / de + gb[j] = math.ceil(L[j] / dx[j]) + Afac[j] = params['Min' + str(j) + '-Afrac'] + Bfac[j] = params['Min' + str(j) + '-Bfrac'] + Cfac[j] = params['Min' + str(j) + '-Cfrac'] + D0[j] = params['Min' + str(j) + '-Dparam1'] + Q[j] = params['Min' + str(j) + '-Dparam2'] + oxcon[j] = params['Min' + str(j) + '-Oxcon'] + d180[j] = 99 + + # convert input to micron + L = L * 1e-4 + w = w * 1e-4 + r = r * 1e-4 + dx = dx * 1e-4 + maxdim = max(gb) + # normalize mineral modes + mode = mode.copy() / sum(mode) + + # caclculate mineral surface area + for m in range(0, nmin): + if shape[m] == 1: + SA[m] = (4 * pi * pow(r[m], 2)) + else: + SA[m] = 2 * L[m] * w[m] + + # initial conditions (starting concentration profiles) + for m in range(0, nmin): + fracfax[m] = Afac[m] + ((Bfac[m] * 1e3) / T0) + ((Cfac[m] * 1e6) / pow(T0, 2)) + + # recalculate estimated whole rock based on disequilibrium phase (only works + # for one diseq phase in this formulation; preferrably a low-volume/accessory phase) + for m in range(0, nmin): + if d180[m] < 99: + WRd180 = WRd180 * (1 - mode[m]) + d180[m] * mode + + d180mon = WRd180 + dot(mode, fracfax) + gbvalinit = zeros([nmin]) + for m in range(0, nmin): + gbvalinit[m] = d180mon - fracfax[m] + Told = zeros([nmin, int(max(gb))]) + for m in range(0, nmin): + if d180[m] == 99: + Told[m, 0:int(gb[m])] = gbvalinit[m] + else: + Told[m, 0:int(gb[m])] = d180[m] + + #### Solve fully implicit + #####define data storage matrices + time = zeros([tend]) + Temphx = zeros([tend]) + Tnew = zeros([nmin, int(max(gb))]) + pregbval = zeros([nmin]) + result = zeros([nmin, tend, int(maxdim)]) # array for storing results for all + + # loadingbar.config(maximum=int(tend / 10)) + for t in range(0, int(tend)): + + # if (t % 10 == 0): + # loadingbar.step() + # mainapp.update() + if params['CoolingType'] == "Linear": + DTdt = (Tstart - Tend) / ttot # linear in t + T = T0 - (DTdt * (t + 1) * dt) + elif params['CoolingType'] == "Inverse": + k = ttot / ((1 / Tend) - (1 / Tstart)) + T = 1 / ((((t + 1) * dt) / k) + (1 / Tstart)) + else: + # print('SegDTdt[t]') + # SegDTdt = [] + DTdt = SegDTdt[t]; + T = T - (DTdt * dt); + + D = D0 * exp(-Q / (R * T)) + fracfax = Afac + Bfac * (1e3 / T) + Cfac * (1e6 / pow(T, 2)) + coeff = D / dx + + for m in range(0, nmin): + if shape[m] == 1: # spherical/isotropic diffusion geometry + gb[m] = math.ceil(r[m] / dx[m]) + 1 + a = ones([int(gb[m])]) + a[int(gb[m]) - 1] = 2 + b = (-2 - ((dx[m] * dx[m])) / (D[m] * deltat)) * ones([int(gb[m])]) + c = ones([int(gb[m])]) + c[0] = 2 + d = -((dx[m] * dx[m]) / (D[m] * deltat)) * Told[m, 0:int(gb[m])] + for i in range(1, int(gb[m]) - 1): + a[i] = (i - 1) / i + c[i] = (i + 1) / i + else: # slab/infinite plane diffusion geometry + a = ones([int(gb[m])]) + a[int(gb[m]) - 1] = 2 + b = (-2 - ((dx[m] * dx[m])) / (D[m] * deltat)) * ones([int(gb[m])]) + c = ones([int(gb[m])]) + c[0] = 2 + d = -((dx[m] * dx[m]) / (D[m] * deltat)) * Told[m, 0:int(gb[m])] + TD = diag(b) + diag(a[1:], -1) + diag(c[0:-1], 1) + Tnew[m, 0:int(gb[m])] = linalg.solve(TD, d) + pregbval[m] = Tnew[m, int(gb[m]) - 1] + + gbval = fluxbal(nmin, pregbval, coeff, fracfax, mode, SA, oxcon) + for m in range(0, nmin): + Tnew[m, int(gb[m]) - 1] = gbval[m] + if shape[m] == 2: + Tnew[m, 1] = gbval[m] + Told[m, :] = Tnew[m, :] + time[t] = t * dt + Temphx[t] = T + result[m, t, 0:int(gb[m])] = Told[m, 0:int(gb[m])] + + # create global variables to store results in + # loadingbar.destroy() + global yresult, timeresult, xresult + yresult = result + xsteps = yresult.shape[2] + xresult = zeros((xsteps, nmin)) + timeresult = linspace(0, 1, yresult.shape[1]) * ttot + for i in range(0, nmin): + if shape[i] == 1: + xresult[:, i] = 1e4 * linspace(dx[i], 2 * r[i] - dx[i], xsteps) + onesidey = result[i, :, 0:int(xsteps / 2)] + yresult[i, :, 0:2 * int(xsteps / 2)] = concatenate((onesidey[:, ::-1], onesidey), axis=1) + else: + xresult[:, i] = 1e4 * linspace(dx[i], L[i] - dx[i], xsteps) + + # print(systime.time() - start_time) + # print(array(xresult).shape) + # print(array(yresult).shape) + # print(array(timeresult).shape) + + return xresult, yresult, timeresult + +def write_params_file(params_dict, output_path, filename): + """ + The script outputs a .txt parameter file that can be read by make_params_dict() function + :param params_dict: dictionary of standard DiffuisionSolver parameters + :param output_path: a string showing the location of the directory the file should go to + :param filename: string naming the .txt_file + :return: + """ + + # todo - make this ordered rather than dict based + + wpath = os.path.join(output_path, '{}.txt'.format(filename)) + + with open(wpath, 'w') as wfile: + for k, v in params_dict.items(): + wfile.write('{},{}\n'.format(k, v)) + +def make_params_dict(params): + """ + + :param params: a path to a standard formatted .txt file such as is output + from save parameters function in DiffusionSolver + :return: dictionary of the model parameters to be used in forward_model_slow_bulk() + """ + + fwd_model_parameters = {} + + with open(params, 'r') as rfile: + for line in rfile: + # print('{}'.format(line)) + param_val = line.split(',') + # print('param {} value {}'.format(param_val[0], param_val[1])) + fwd_model_parameters[param_val[0]] = param_val[1][0:-1] + + print(fwd_model_parameters['CoolingType']) + print(len(fwd_model_parameters['CoolingType'])) + + return fwd_model_parameters + +def find_nearest(array, value): + """ + Get the index of the value nearest another value in numpy array + :param array: + :param value: + :return: + """ + array = asarray(array) + idx = (abs(array - value)).argmin() + return idx + +def generate_synthetic_data(x_arr, y_arr, noise, sample_locations, output_location, output_name): + """ + + :param x_arr: distance across lattice + :param y_arr: from late time + :param noise: as a decimal + :param sample_locations: a list of distances in cm where we grab samples as distances from the end of the crystal + :return: + """ + + # print(x_arr) + # print('noise {}'.format(noise)) + # print('sample locations {}'.format(sample_locations)) + + print(x_arr.shape) + print(y_arr.shape) + + yvals = [] + xvals = [] + for val in sample_locations: + min_indx = find_nearest(x_arr, val) + yvals.append(y_arr[min_indx]) + xvals.append(x_arr[min_indx]) + + # print('yvals {}'.format(yvals)) + # print('xvals {}'.format(xvals)) + + yvals = array(yvals) + xvals = array(xvals) + + # Add noise + # random.normal(center of normal ditribution, std deviation of normal distribution, size of arr) + noise_arr = random.normal(0.0, noise, len(yvals)) + # print('noise arr \n {}'.format(noise_arr)) + # you add the noise based on analytical uncertainty 0.28 (1st std dev) parts per thousand (less in the future) + yvals_noise = yvals + noise_arr + # print('noisy yvals \n {}'.format(yvals_noise)) + + uncertainty = full(noise_arr.shape, noise) + + + # write the noisy file to a txt file + + wfilepath = os.path.join(output_location, '{}_sampled_noisy.txt'.format(output_name)) + + with open(wfilepath, 'w') as wfile: + + for d, o, un in zip(xvals, yvals, uncertainty): + wfile.write('{} {} {}\n'.format(d, o, un)) + + print('done writing to {}'.format(wfilepath)) + + +def sample_locator(x_arr): + """ + + :param x_arr: distance along crystal lattice. + :return: list of sample locations less than 20 microns apart + """ + + x_arr = [i for i in x_arr] + sample_section = x_arr[1:-1] + + sample_locations = [] + + for i in range(len(sample_section)): + + if i < len(sample_section): + if i == 0: + sample_locations.append(sample_section[i]) + if i +1 < len(sample_section): + + if sample_section[i+1] - sample_section[i] < 10.0: + break + elif sample_section[i+1] - sample_section[i] <= 20.0 and i != 0: + # sample i+1 + sample_locations.append(sample_section[i]) + return sample_locations + + + +if __name__ == "__main__": + + # path to the paramter style file + chloe_model_params = r'/home/gabriel/PycharmProjects/FastGrainBoundary-DiffusionSolver/sensitivity_analysis/original_eiler/Eiler94_Amphibolite.txt' + + fwd_model_parameters = make_params_dict(params=chloe_model_params) + + xresult, yresult, timeresult = forward_model_slow_bulk(fwd_model_parameters) + + print(array(xresult).shape) + print(array(yresult).shape) + print(array(timeresult).shape) + + + # TODO - Find a way to store these and then do the visualizations... + # For now just store them as .npy arrays + filename = os.path.split(chloe_model_params)[1] + output = os.path.split(chloe_model_params)[0] + save_tag = filename.split('.')[0] + + # Save each array as a .npy file + save(os.path.join(output, '{}_x.npy'.format(save_tag)), xresult) + save(os.path.join(output, '{}_y.npy'.format(save_tag)), yresult) + save(os.path.join(output, '{}_time.npy'.format(save_tag)), timeresult) + diff --git a/eiler_SA/frwrd_bulk_run.py b/eiler_SA/frwrd_bulk_run.py new file mode 100644 index 0000000..d390041 --- /dev/null +++ b/eiler_SA/frwrd_bulk_run.py @@ -0,0 +1,59 @@ +# =============================================================================== +# Copyright 2019 Jan Hendrickx and Gabriel Parrish +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# =============================================================================== +import os +from numpy import * +# ============= standard library imports ======================== +from eiler_SA.forwardmodeling_sa import make_params_dict, forward_model_slow_bulk + +""" +A script to run the model over a series of files with differing parameters from Eiler 94 for sensitivity analysis. +The arrays from the forward model will be saved w naming conventions based on the input params .txt filename +""" + +if __name__ == "__main__": + + # root path + #root = '/Users/dcadol/Desktop/academic_docs_II/FGB_model/JohnEiler/plag_hornblende_sensitivity' + # root = '/home/dan/Documents/Eiler_94/plag_hornblende_sensitivity' + # root = '/home/gabriel/Documents/Euler_SA/euler_modality' + # root = '/home/gabriel/Documents/FGB_model/JohnEiler/modality_SF/' + root = '/home/gabriel/PycharmProjects/FastGrainBoundary-DiffusionSolver/sensitivity_analysis/mode_length_chloe_cooling/blockD/' + outroot = '/home/gabriel/PycharmProjects/FastGrainBoundary-DiffusionSolver/sensitivity_analysis/modality/modality_len_results_fwd/blockD/' + if not os.path.exists(outroot): + os.mkdir(outroot) + # root = '/home/gabriel/PycharmProjects/FastGrainBoundary-DiffusionSolver/sensitivity_analysis/mode_length_configs/' + # outroot = '/home/gabriel/PycharmProjects/FastGrainBoundary-DiffusionSolver/sensitivity_analysis/modality/modality_fwd_results/' + # root = '/home/gabriel/PycharmProjects/FastGrainBoundary-DiffusionSolver/sensitivity_analysis/mode_length_configs/' + + for dir in os.listdir(root): + print('dir', dir) + param_path = os.path.join(root, dir) + print('path', param_path) + + save_name = dir.split('.')[0] + + fwd_model_params = make_params_dict(params=param_path) + + print('running the slow model with a cooling file') + xresult, yresult, timeresult = forward_model_slow_bulk(fwd_model_params, coolfile=True) + + # print('running the slow model with no cooling file') + # xresult, yresult, timeresult = forward_model_slow_bulk(fwd_model_params) + + # Save each array as a .npy file + save(os.path.join(outroot, '{}_x.npy'.format(save_name)), xresult) + save(os.path.join(outroot, '{}_y.npy'.format(save_name)), yresult) + save(os.path.join(outroot, '{}_time.npy'.format(save_name)), timeresult) \ No newline at end of file diff --git a/eiler_SA/full_params.py b/eiler_SA/full_params.py new file mode 100644 index 0000000..f6a5915 --- /dev/null +++ b/eiler_SA/full_params.py @@ -0,0 +1,72 @@ +import os +from eiler_SA.forwardmodeling_sa import make_params_dict, write_params_file + +chloe_model_params = '/home/gabriel/PycharmProjects/FastGrainBoundary-DiffusionSolver/sensitivity_analysis/original_eiler/Eiler94_Amphibolite.txt' + +output = os.path.split(chloe_model_params)[0] + +output = os.path.join(output, 'mode_length_temp') + +fwd_model_parameters = make_params_dict(chloe_model_params) + +print(fwd_model_parameters) +# +# # TODO - first change the parameters of the plag vs hornblende modalities +print(fwd_model_parameters['Min1-Mode']) + +# 0.3 - base case +plag_mode_key = 'Min1-Mode' +plagioclase_mode = float(fwd_model_parameters[plag_mode_key]) +plagioclase_mode_start = 0.1 +# 0.6 - base case +hornb_mode_key = 'Min2-Mode' +hornblende_mode = float(fwd_model_parameters[hornb_mode_key]) +hornblende_mode_start = 0.8 + +plag_length_key = 'Min1-W' +plag_length = float(fwd_model_parameters[plag_length_key]) +plag_length_start = 200.0 + +horn_length_key = 'Min2-R' +horn_length = float(fwd_model_parameters[horn_length_key]) +horn_length_start = 10000.0 + +lengthstep = 100.0 +# this will be the step by which we change the relative mode of plag to hornblende +step = 0.05 + +leng_div = int((max(horn_length_start, plag_length_start) - min(horn_length_start, plag_length_start))/lengthstep) + +div = int(max(hornblende_mode_start, plagioclase_mode_start) / step) +print(div) + +for j in range(leng_div + 1): + + delta_len = float(j * lengthstep) + + plag_len = plag_length_start + delta_len + horn_len = horn_length_start - delta_len + + # for each length, change the modality around + for i in range(div): + # print(i) + + # subract from plag and and to hornblende to change the relative concentration + delta_mode = float(i * step) + # delta_mode = round(delta_mode, 1) + plag_mode = plagioclase_mode_start + delta_mode + plag_mode = round(plag_mode, 1) + hornb_mode = hornblende_mode_start - delta_mode + hornb_mode = round(hornb_mode, 1) + + print('plag mode: {}, hornb mode {}'.format(plag_mode, hornb_mode)) + + # at each step in sensitivity set the new mode value (as a string) in the parameters to the new values + fwd_model_parameters[plag_mode_key] = str(plag_mode) + fwd_model_parameters[hornb_mode_key] = str(hornb_mode) + fwd_model_parameters[horn_length_key] = str(horn_len) + fwd_model_parameters[plag_length_key] = str(plag_len) + + # now write to file + filename = 'eiler94_p{}_h{}_pl{}_hl{}'.format(str(plag_mode)[-1], str(hornb_mode)[-1], int(plag_len), int(horn_len)) + write_params_file(fwd_model_parameters, output, filename) \ No newline at end of file diff --git a/eiler_SA/fwrd_model_test.py b/eiler_SA/fwrd_model_test.py new file mode 100644 index 0000000..5b18f29 --- /dev/null +++ b/eiler_SA/fwrd_model_test.py @@ -0,0 +1,22 @@ +import os +from eiler_SA.forwardmodeling_sa import make_params_dict, forward_model_slow_bulk +# +# param_path = '/home/gabriel/PycharmProjects/FastGrainBoundary-DiffusionSolver/Examples/Ex_Params4.txt' +# fwd_model_params = make_params_dict(params=param_path) +# +# print('running the slow model with a cooling file') +# xresult, yresult, timeresult = forward_model_slow_bulk(fwd_model_params, coolfile=True) + +# param_path = '/home/gabriel/PycharmProjects/FastGrainBoundary-DiffusionSolver/sensitivity_analysis/mode_length_configs/eiler94_p1_h9_pl200_hl500.txt' +# fwd_model_params = make_params_dict(params=param_path) +# print(fwd_model_params) +# # xresult, yresult, timeresult = forward_model_slow_bulk(fwd_model_params, coolfile=False) +# +# # print(len(xresult), len(yresult), len(timeresult)) + +param_path = '/home/gabriel/PycharmProjects/FastGrainBoundary-DiffusionSolver/sensitivity_analysis/mode_length_chloe_cooling/blockA/eiler94_p10_h80_pl1500_hl2000.txt' +fwd_model_params = make_params_dict(params=param_path) +print(fwd_model_params) +xresult, yresult, timeresult = forward_model_slow_bulk(fwd_model_params, coolfile=True) + +print(len(xresult), len(yresult), len(timeresult)) \ No newline at end of file diff --git a/eiler_SA/parameter_generator.py b/eiler_SA/parameter_generator.py new file mode 100644 index 0000000..17544ae --- /dev/null +++ b/eiler_SA/parameter_generator.py @@ -0,0 +1,72 @@ +# =============================================================================== +# Copyright 2019 Gabriel Kropf and Gabriel Parrish +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# =============================================================================== +import os +# ============= standard library imports ======================== +from eiler_SA.forwardmodeling_sa import make_params_dict, write_params_file + +if __name__ == "__main__": + + chloe_model_params = '/home/gabriel/PycharmProjects/FastGrainBoundary-DiffusionSolver/sensitivity_analysis/original_eiler/Eiler94_Amphibolite.txt' + + output = os.path.split(chloe_model_params)[0] + + output = os.path.join(output, 'modality_temp') + + fwd_model_parameters = make_params_dict(chloe_model_params) + + print(fwd_model_parameters) + # + # # TODO - first change the parameters of the plag vs hornblende modalities + print(fwd_model_parameters['Min1-Mode']) + + # 0.3 - base case + plag_mode_key = 'Min1-Mode' + plagioclase_mode = float(fwd_model_parameters[plag_mode_key]) + plagioclase_mode_start = 0.1 + # 0.6 - base case + hornb_mode_key = 'Min2-Mode' + hornblende_mode = float(fwd_model_parameters[hornb_mode_key]) + hornblende_mode_start = 0.8 + + # this will be the step by which we change the relative mode of plag to hornblende + step = 0.1 + + div = int(max(hornblende_mode_start, plagioclase_mode_start) / step) + print(div) + for i in range(div): + # print(i) + + # subract from plag and and to hornblende to change the relative concentration + delta_mode = float(i * step) + # delta_mode = round(delta_mode, 1) + plag_mode = plagioclase_mode_start + delta_mode + plag_mode = round(plag_mode, 1) + hornb_mode = hornblende_mode_start - delta_mode + hornb_mode = round(hornb_mode, 1) + + print('plag mode: {}, hornb mode {}'.format(plag_mode, hornb_mode)) + + # at each step in sensitivity set the new mode value (as a string) in the parameters to the new values + fwd_model_parameters[plag_mode_key] = str(plag_mode) + fwd_model_parameters[hornb_mode_key] = str(hornb_mode) + + # now write to file + filename = 'eiler94_p{}_h{}'.format(str(plag_mode)[-1], str(hornb_mode)[-1]) + write_params_file(fwd_model_parameters, output, filename) + + + + # # TODO - then change the sizes of the plag and hornblende crystals in another test. \ No newline at end of file diff --git a/eiler_SA/solution_plotter.py b/eiler_SA/solution_plotter.py new file mode 100644 index 0000000..0141a38 --- /dev/null +++ b/eiler_SA/solution_plotter.py @@ -0,0 +1,31 @@ +import os +import pandas as pd +import matplotlib.pyplot as plt + +root = 'file:///home/gabriel/PycharmProjects/FastGrainBoundary-DiffusionSolver/Output/yyy' + +file1 = 'initial_eiler_94_2_results_in_this_sol.csv' + +file2 = 'initial_eiler_94_results_in_this_sol.csv' + +names = ['file1', 'file2'] +file_lst = [file1, file2] +color_lst = ['red', 'green'] + +fig, ax = plt.subplots() + +# TODO - here is where you plot the solution +# ax.plot(time, temp, color='black', label=True solution) + +for i, j, c in zip(names, file_lst, color_lst): + fpath = os.path.join(root, j) + + df = pd.read_csv(fpath, header=0) + + ax.plot(df.Time, df.Temperature, color=c, label='initial_sol_{}'.format(i)) + +plt.legend() +plt.show() + + + diff --git a/eiler_SA/targeted_param_gen.py b/eiler_SA/targeted_param_gen.py new file mode 100644 index 0000000..182f3f5 --- /dev/null +++ b/eiler_SA/targeted_param_gen.py @@ -0,0 +1,43 @@ +import os +from eiler_SA.forwardmodeling_sa import make_params_dict, write_params_file + +chloe_model_params = '/home/gabriel/PycharmProjects/FastGrainBoundary-DiffusionSolver/sensitivity_analysis/original_eiler/Eiler94_Amphibolite.txt' +output = os.path.split(chloe_model_params)[0] +output = os.path.join(output, 'mode_length_chloe') + +fwd_model_parameters = make_params_dict(chloe_model_params) + +print(fwd_model_parameters) +# +# # TODO - first change the parameters of the plag vs hornblende modalities +print(fwd_model_parameters['Min1-Mode']) + +# 0.3 - base case +plag_mode_key = 'Min1-Mode' +plagioclase_mode = int(input('Plag Mode: ')) +pm = plagioclase_mode/100.0 + + +# 0.6 - base case +hornb_mode_key = 'Min2-Mode' +hornblende_mode = int(input('Hornblend Mode: ')) +hm = hornblende_mode/100.0 + +plag_length_key = 'Min1-W' +plag_length = input('Plag Length: ') + +horn_length_key = 'Min2-R' +horn_length = input('Hornblende Length: ') + +# at each step in sensitivity set the new mode value (as a string) in the parameters to the new values +fwd_model_parameters[plag_mode_key] = str(pm) +fwd_model_parameters[hornb_mode_key] = str(hm) +fwd_model_parameters[horn_length_key] = str(horn_length) +fwd_model_parameters[plag_length_key] = str(plag_length) + +print('params to write') +print(fwd_model_parameters) + +# now write to file +filename = 'eiler94_p{}_h{}_pl{}_hl{}'.format(str(plagioclase_mode), str(hornblende_mode), int(plag_length), int(horn_length)) +write_params_file(fwd_model_parameters, output, filename) \ No newline at end of file diff --git a/inversemodeldialog.py b/inversemodeldialog.py index 02370d5..3e868da 100644 --- a/inversemodeldialog.py +++ b/inversemodeldialog.py @@ -28,7 +28,8 @@ def __init__(self,mainapp,init_soln,init_sse,init_aLm): self.ax1.legend() self.canvas1 = FigureCanvasTkAgg(self.fig1,frame1) - self.canvas1.show() + #self.canvas1.show() + self.canvas1.draw() self.canvas1.get_tk_widget().pack(side='top', fill='both', expand=1) self.canvas1._tkcanvas.pack(side='top', fill='both', expand=1) frame1.grid(row=2, column=0, padx=(10,10), pady=(5,10)) @@ -47,7 +48,8 @@ def __init__(self,mainapp,init_soln,init_sse,init_aLm): self.ax2.legend() self.canvas2 = FigureCanvasTkAgg(self.fig2, frame2) - self.canvas2.show() + #self.canvas2.show() + self.canvas2.draw() self.canvas2.get_tk_widget().pack(side='top', fill='both', expand=1) self.canvas2._tkcanvas.pack(side='top', fill='both', expand=1) frame2.grid(row=2, column=1, padx=(10,10), pady=(5,10)) diff --git a/modelfunctions.py b/modelfunctions.py index 8bdc601..c8dc6e4 100644 --- a/modelfunctions.py +++ b/modelfunctions.py @@ -7,14 +7,14 @@ import matplotlib.pyplot as plt import os + # This is the main diffusion solver function that uses python, this results in a 3d array that has the # output at every timestep. def forwardmodel_slow(mainapp): - - #read parameters and create loading bar - loadingbar=ttk.Progressbar(mainapp.page1.graphingframe, mode='determinate', length='2i', maximum=1000) - loadingbar.grid(row=20, column=0, padx=(5,0), pady=(50,5), sticky='sw') - params=mainapp.page1.forwardparams + # read parameters and create loading bar + loadingbar = ttk.Progressbar(mainapp.page1.graphingframe, mode='determinate', length='2i', maximum=1000) + loadingbar.grid(row=20, column=0, padx=(5, 0), pady=(50, 5), sticky='sw') + params = mainapp.page1.forwardparams mainapp.update() def fluxbal(z, e, f, h, j, k, l): @@ -30,8 +30,8 @@ def fluxbal(z, e, f, h, j, k, l): X = linalg.solve(A, B) return X - # get user defined info + print(params) ttot = float(params['ModelDuration'].get()) dt = float(params['TimeStep'].get()) WRd180 = float(params['WholeRock'].get()) @@ -40,7 +40,7 @@ def fluxbal(z, e, f, h, j, k, l): nmin = int(params['NumMinerals'].get()) de = 100 - cool_file=params['CoolingFile'].get() + cool_file = params['CoolingFile'].get() if params['CoolingType'].get() == "Custom": # read data in as matrix without using pandas file = open(cool_file, 'r', encoding='ISO-8859-1') @@ -125,7 +125,6 @@ def fluxbal(z, e, f, h, j, k, l): else: SA[m] = 2 * L[m] * w[m] - # initial conditions (starting concentration profiles) for m in range(0, nmin): fracfax[m] = Afac[m] + ((Bfac[m] * 1e3) / T0) + ((Cfac[m] * 1e6) / pow(T0, 2)) @@ -223,46 +222,42 @@ def fluxbal(z, e, f, h, j, k, l): else: xresult[:, i] = 1e4 * linspace(dx[i], L[i] - dx[i], xsteps) - #print(systime.time() - start_time) - #print(array(xresult).shape) - #print(array(yresult).shape) - #print(array(timeresult).shape) - + # print(systime.time() - start_time) + # print(array(xresult).shape) + # print(array(yresult).shape) + # print(array(timeresult).shape) return xresult, yresult, timeresult # This is the main diffusion solver that uses C, this results in a 2d array that has only the last time step. -def forwardmodel_fast(file_param,cool_array): - - unique_ID=1 - df=pandas.DataFrame(cool_array) - df.to_csv(str(unique_ID)+".txt", sep=",", header=None, index=None) +def forwardmodel_fast(file_param, cool_array): + unique_ID = 1 + df = pandas.DataFrame(cool_array) + df.to_csv(str(unique_ID) + ".txt", sep=",", header=None, index=None) - os.system("./Cmodel/RunModel "+file_param+" "+str(unique_ID)+".txt "+str(unique_ID)+"X.txt "+str(unique_ID)+"Y.txt") + os.system("./Cmodel/RunModel " + file_param + " " + str(unique_ID) + ".txt " + str(unique_ID) + "X.txt " + str( + unique_ID) + "Y.txt") - #now read input from C code output - X=pandas.read_csv(str(unique_ID)+"X.txt", sep=',', header=None).values - Y=pandas.read_csv(str(unique_ID)+"Y.txt", sep=',', header=None).values + # now read input from C code output + X = pandas.read_csv(str(unique_ID) + "X.txt", sep=',', header=None).values + Y = pandas.read_csv(str(unique_ID) + "Y.txt", sep=',', header=None).values - #remove text files that have been created + # remove text files that have been created os.system("rm 1.txt") os.system("rm 1Y.txt") os.system("rm 1X.txt") - return X, Y - # These are all the functions for computing the inverse solution def calc_single_diffs(xmod, ymod, error_file): - # compute traverse for file def octrav(error_file): replacedat = pandas.read_csv(error_file, sep=' ', header=None).values - return replacedat[:,0], replacedat[:,1], replacedat[:,2] + return replacedat[:, 0], replacedat[:, 1], replacedat[:, 2] xactual, yactual, uncert = octrav(error_file) @@ -272,163 +267,160 @@ def octrav(error_file): yactual = yactual[ind_keep] uncert = uncert[ind_keep] - if len(ind_keep)>0: + if len(ind_keep) > 0: # find interpolation for each point - coef=interpolate.splrep(xmod,ymod) - y_corr=interpolate.splev(xactual,coef) + coef = interpolate.splrep(xmod, ymod) + y_corr = interpolate.splev(xactual, coef) - #return differences divided by sigma - diff = divide(y_corr-yactual,uncert) + # return differences divided by sigma + diff = divide(y_corr - yactual, uncert) else: - diff=[] + diff = [] return list(diff) -def calc_residuals(mainapp,cool_array,reg_alpha): - +def calc_residuals(mainapp, cool_array, reg_alpha): # record all difference from each mineral within each sample - D_total=[] + D_total = [] for i in mainapp.page2.forwardmods: # if cool history has negative temps give high residuals - if any(cool_array[:,1]<100): - fakecool=array([[0,700],[1,500]]) + if any(cool_array[:, 1] < 100): + fakecool = array([[0, 700], [1, 500]]) xdata, ydata = forwardmodel_fast(mainapp.page2.forwardmodelframe.forwardmodels_var[i].get(), fakecool) - ydata = ydata+300 + ydata = ydata + 300 else: xdata, ydata = forwardmodel_fast(mainapp.page2.forwardmodelframe.forwardmodels_var[i].get(), cool_array) - for k in range(0,8): - error_file=mainapp.page2.errorfileframe.errfile_var[(i,k)].get() + for k in range(0, 8): + error_file = mainapp.page2.errorfileframe.errfile_var[(i, k)].get() if error_file: - D_total=D_total+calc_single_diffs(xdata[:,k],ydata[:,k],error_file) + D_total = D_total + calc_single_diffs(xdata[:, k], ydata[:, k], error_file) - residuals=array(D_total).reshape((-1,1))+(.33/25)*(499-min(min(cool_array[:,1]),499))**2 + residuals = array(D_total).reshape((-1, 1)) + (.33 / 25) * (499 - min(min(cool_array[:, 1]), 499)) ** 2 # compute regularization term for cooling history N = len(residuals) m = len(cool_array) - #create L matrix to be used as constraint, currently constructed to penalize 2nd derivative - L=zeros((m-2,m)) - for i in range(0,m-2): - L[i,i:i+3]=[1, -2, 1] - - #stack to make K vector - res=vstack((residuals,matmul(reg_alpha*L,cool_array[:,1]).reshape(-1,1))) - return res.reshape(N+m-2) + # create L matrix to be used as constraint, currently constructed to penalize 2nd derivative + L = zeros((m - 2, m)) + for i in range(0, m - 2): + L[i, i:i + 3] = [1, -2, 1] + # stack to make K vector + res = vstack((residuals, matmul(reg_alpha * L, cool_array[:, 1]).reshape(-1, 1))) + return res.reshape(N + m - 2) -def calc_jacob(mainapp,cool_array,curr_res,reg_alpha,past_SSE,past_aLm): +def calc_jacob(mainapp, cool_array, curr_res, reg_alpha, past_SSE, past_aLm): # Calculate current sse and smoothness - m=len(cool_array) - curr_res=calc_residuals(mainapp,cool_array,reg_alpha) - curr_SSE=sum(curr_res[0:-(m-2)]**2) - curr_aLm=sum(curr_res[-(m-2):]**2) + m = len(cool_array) + curr_res = calc_residuals(mainapp, cool_array, reg_alpha) + curr_SSE = sum(curr_res[0:-(m - 2)] ** 2) + curr_aLm = sum(curr_res[-(m - 2):] ** 2) past_SSE.append(curr_SSE) past_aLm.append(curr_aLm) # Update progress figures - mainapp.page2.progwind.line11.set_data(cool_array[:,0],cool_array[:,1]) - mainapp.page2.progwind.line12.set_data(cool_array[:,0],cool_array[:,1]) - limx1, limx2 = min(cool_array[:,0]), max(cool_array[:,0]) - limy1, limy2 = min(cool_array[:,1]), max(cool_array[:,1]) + mainapp.page2.progwind.line11.set_data(cool_array[:, 0], cool_array[:, 1]) + mainapp.page2.progwind.line12.set_data(cool_array[:, 0], cool_array[:, 1]) + limx1, limx2 = min(cool_array[:, 0]), max(cool_array[:, 0]) + limy1, limy2 = min(cool_array[:, 1]), max(cool_array[:, 1]) - mainapp.page2.progwind.ax1.set_ylim([limy1-.03*limy2, 1.03*limy2]) - mainapp.page2.progwind.ax1.set_xlim([limx1-.03*limx2, 1.03*limx2]) + mainapp.page2.progwind.ax1.set_ylim([limy1 - .03 * limy2, 1.03 * limy2]) + mainapp.page2.progwind.ax1.set_xlim([limx1 - .03 * limx2, 1.03 * limx2]) # Update 30 most recent objective values into bar plot. The bar graph does not have a built in function # to update the number of entries. - num_iter=len(past_SSE) - if num_iter<31: + num_iter = len(past_SSE) + if num_iter < 31: for i in range(len(past_SSE)): - mainapp.page2.progwind.bar2[i].set_height(past_SSE[i]+past_aLm[i]) + mainapp.page2.progwind.bar2[i].set_height(past_SSE[i] + past_aLm[i]) mainapp.page2.progwind.bar2[i].set_height(past_SSE[i]) else: - for i in range(num_iter-30,num_iter): - k = i+(num_iter-30) - mainapp.page2.progwind.bar2[i].set_height(past_SSE[k]+past_aLm[k]) + for i in range(num_iter - 30, num_iter): + k = i + (num_iter - 30) + mainapp.page2.progwind.bar2[i].set_height(past_SSE[k] + past_aLm[k]) mainapp.page2.progwind.bar2[i].set_height(past_SSE[k]) - mainapp.page2.progwind.canvas1.draw() mainapp.page2.progwind.canvas2.draw() mainapp.update() - N=len(curr_res) - deltac=1; + N = len(curr_res) + deltac = 1; - J=zeros([N,m-2]) - for i in range(1,m-1): - mainapp.page2.progwind.calc_var.set("calculating Jacobian column "+str(i)+" of "+str(m-2)) + J = zeros([N, m - 2]) + for i in range(1, m - 1): + mainapp.page2.progwind.calc_var.set("calculating Jacobian column " + str(i) + " of " + str(m - 2)) mainapp.update() - new_cool=cool_array.copy() - new_cool[i,1]=new_cool[i,1]+deltac - deriv=(calc_residuals(mainapp,new_cool,reg_alpha)-curr_res)/deltac - J[:,i-1]=deriv.reshape(N) + new_cool = cool_array.copy() + new_cool[i, 1] = new_cool[i, 1] + deltac + deriv = (calc_residuals(mainapp, new_cool, reg_alpha) - curr_res) / deltac + J[:, i - 1] = deriv.reshape(N) return J # This function runs the inverse solver for the minerals, initial profiles given def find_inverses(mainapp): + alpha = 0.04 + alpha = 0.1 + alpha = 0.001 + # alpha = 0.07 - alpha=0.04032227 - - for i in range(0,mainapp.page2.num_initials): - if (mainapp.page2.initsolutions.initial_vars[i].get()==1): - file_loc=mainapp.page2.initsolutions.folder_var.get()+mainapp.page2.initsolutions.initial_labs[i].get() - initial_sol=pandas.read_csv(file_loc, sep=',', header=None).values - m=len(initial_sol) - + for i in range(0, mainapp.page2.num_initials): + if (mainapp.page2.initsolutions.initial_vars[i].get() == 1): + file_loc = mainapp.page2.initsolutions.folder_var.get() + mainapp.page2.initsolutions.initial_labs[i].get() + initial_sol = pandas.read_csv(file_loc, sep=',', header=None).values + m = len(initial_sol) # Calculate initial residuals and smoothness, and create progress plots that will be updated # each time the Jac function is called. - initial_res=calc_residuals(mainapp,initial_sol,alpha) - initial_SSE=sum(initial_res[0:-(m-2)]**2) - initial_aLm=sum(initial_res[-(m-2):]**2) - - past_sse=[initial_SSE] - past_aLm=[initial_aLm] - mainapp.page2.create_progwind(mainapp,initial_sol,initial_SSE,initial_aLm) - mainapp.page2.progwind.init_var.set("Running LM algorithm with starting solution: /"+ + initial_res = calc_residuals(mainapp, initial_sol, alpha) + initial_SSE = sum(initial_res[0:-(m - 2)] ** 2) + initial_aLm = sum(initial_res[-(m - 2):] ** 2) + + past_sse = [initial_SSE] + past_aLm = [initial_aLm] + mainapp.page2.create_progwind(mainapp, initial_sol, initial_SSE, initial_aLm) + mainapp.page2.progwind.init_var.set("Running LM algorithm with starting solution: /" + mainapp.page2.initsolutions.initial_labs[i].get()) - - - # Create residual and jacobian function that accepts single array as input (for built in optimize function) def usey_residuals(cool_y): - full_soln=initial_sol.copy() - full_soln[1:-1,1]=cool_y + full_soln = initial_sol.copy() + full_soln[1:-1, 1] = cool_y return calc_residuals(mainapp, full_soln, alpha) def usey_jacob(soln_y): - full_soln=initial_sol.copy() - full_soln[1:-1,1]=soln_y - curr_res=calc_residuals(mainapp, full_soln, alpha) + full_soln = initial_sol.copy() + full_soln[1:-1, 1] = soln_y + curr_res = calc_residuals(mainapp, full_soln, alpha) return calc_jacob(mainapp, full_soln, curr_res, alpha, past_sse, past_aLm) # Now find solution and convert it back into 2D array - initial_solny=initial_sol[1:-1,1].copy() - sol=optimize.least_squares(usey_residuals, initial_solny , jac=usey_jacob, method='lm', gtol=1e-9) - final_sol=initial_sol.copy() - final_sol[1:-1,1]=sol.x + initial_solny = initial_sol[1:-1, 1].copy() + sol = optimize.least_squares(usey_residuals, initial_solny, jac=usey_jacob, method='lm', gtol=1e-9) + final_sol = initial_sol.copy() + final_sol[1:-1, 1] = sol.x + + # Save final solution to Outputs folder + final_df = pandas.DataFrame(final_sol) + final_df.columns = ['Time', 'Temperature'] + save_location = 'Output/' + mainapp.page2.initsolutions.initial_labs[i].get()[ + :-4] + '_results_in_this_sol.csv' + final_df.to_csv(save_location, header=True, index=None) # Calculate objective values and SSE - final_res=calc_residuals(mainapp,final_sol,alpha) - final_SSE=sum(final_res[0:-(m-2)]**2) - final_aLm=sum(final_res[-(m-2):]**2) + final_res = calc_residuals(mainapp, final_sol, alpha) + final_SSE = sum(final_res[0:-(m - 2)] ** 2) + final_aLm = sum(final_res[-(m - 2):] ** 2) # Get magnitude of final gradient function - K=calc_jacob(mainapp,final_sol,calc_residuals(mainapp,final_sol,alpha),alpha,past_sse,past_aLm) - Kmag=linalg.norm(matmul(transpose(K),final_res)) + K = calc_jacob(mainapp, final_sol, calc_residuals(mainapp, final_sol, alpha), alpha, past_sse, past_aLm) + Kmag = linalg.norm(matmul(transpose(K), final_res)) mainapp.page2.progwind.destroy() - - - - -