From dd9d6c71e61626a642a88909716f8bd3b9a1f87a Mon Sep 17 00:00:00 2001 From: Lindley Graham Date: Tue, 17 May 2016 01:58:57 -0400 Subject: [PATCH] fixed all TODO LG --- README.md | 11 +++++++++++ bet/sampling/adaptiveSampling.py | 10 ++++++---- test/test_sampling/test_adaptiveSampling.py | 3 ++- 3 files changed, 19 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 103f2b71..9f8d89dd 100644 --- a/README.md +++ b/README.md @@ -20,6 +20,17 @@ You will need to run sphinx-apidoc AND reinstall BET anytime a new module or met Useful scripts are contained in ``examples/`` +Tests +----- + +To run tests in serial call:: + + nosetests tests + +To run tests in parallel call:: + + mpirun -np NPROC nosetets tests + Dependencies ------------ diff --git a/bet/sampling/adaptiveSampling.py b/bet/sampling/adaptiveSampling.py index c674c7f2..f916b48c 100644 --- a/bet/sampling/adaptiveSampling.py +++ b/bet/sampling/adaptiveSampling.py @@ -229,6 +229,10 @@ def generalized_chains(self, input_obj, t_set, kern, hot_start=0): """ Basic adaptive sampling algorithm using generalized chains. + + .. todo:: + + Test HOTSTART from parallel files using different and same num proc :param string initial_sample_type: type of initial sample random (or r), latin hypercube(lhs), or space-filling curve(TBD) @@ -331,7 +335,7 @@ def generalized_chains(self, input_obj, t_set, kern, # be the one with the matching processor number (doesn't # really matter) mdat = sio.loadmat(mdat_files[comm.rank]) - disc = sample.load_discretization(savefile) + disc = sample.load_discretization(mdat_files[comm.rank]) kern_old = np.squeeze(mdat['kern_old']) all_step_ratios = np.squeeze(mdat['step_ratios']) elif hot_start == 1 and len(mdat_files) != comm.size: @@ -388,7 +392,6 @@ def generalized_chains(self, input_obj, t_set, kern, kern_old = np.squeeze(mdat['kern_old']) all_step_ratios = np.squeeze(mdat['step_ratios']) chain_length = disc.check_nums()/self.num_chains - #mdat_files = [] # reshape if parallel if comm.size > 1: temp_input = np.reshape(disc._input_sample_set.\ @@ -397,7 +400,6 @@ def generalized_chains(self, input_obj, t_set, kern, temp_output = np.reshape(disc._output_sample_set.\ get_values(), (self.num_chains, chain_length, -1), 'F') - all_step_ratios = np.reshape(all_step_ratios, (self.num_chains, chain_length), 'F') # SPLIT DATA IF NECESSARY @@ -427,7 +429,7 @@ def generalized_chains(self, input_obj, t_set, kern, get_values_local()[-self.num_chains_pproc:, :]) # Determine how many batches have been run - start_ind = disc.check_nums()/self.num_chains_pproc + start_ind = disc._input_sample_set.get_values_local().shape[0]/self.num_chains_pproc mdat = dict() self.update_mdict(mdat) diff --git a/test/test_sampling/test_adaptiveSampling.py b/test/test_sampling/test_adaptiveSampling.py index ad68107c..66679b18 100644 --- a/test/test_sampling/test_adaptiveSampling.py +++ b/test/test_sampling/test_adaptiveSampling.py @@ -154,6 +154,7 @@ def ifun(outputs): assert np.all(all_step_ratios <= t_set.max_ratio) # did the savefiles get created? (proper number, contain proper keys) + comm.barrier() mdat = dict() if comm.rank == 0: mdat = sio.loadmat(savefile) @@ -229,6 +230,7 @@ def map_10t4(x): def tearDown(self): + comm.barrier() for f in self.savefiles: if comm.rank == 0 and os.path.exists(f+".mat"): os.remove(f+".mat") @@ -424,7 +426,6 @@ def ifun(outputs): assert asr > t_set.min_ratio assert asr < t_set.max_ratio - #TODO: LG Fix def test_generalized_chains(self): """ Test :met:`bet.sampling.adaptiveSampling.sampler.generalized_chains`