Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Reverse refactor #21

Merged
merged 2 commits into from
Mar 8, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 6 additions & 12 deletions endure.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@


class EndureDriver:
def __init__(self, config):
self.config = config
def __init__(self, conf):
self.config = conf

logging.basicConfig(
format=config["log"]["format"], datefmt=config["log"]["datefmt"]
Expand All @@ -33,20 +33,14 @@ def run(self):
"LTuneTrain": LTuneTrainJob,
"BayesianBaseline": BayesianPipeline,
}

jobs_list = self.config["job"]["to_run"]
for job_name in jobs_list:
job = jobs.get(job_name)
job = jobs.get(job_name, None)
if job is None:
driver.log.warn(f"No job associated with {job_name}")
self.log.warn(f"No job associated with {job_name}")
continue

conf_path = os.path.join("jobs", "config", f"{job_name}.toml")
with open(conf_path) as jobfid:
job_config = toml.load(jobfid)

job_instance = job(job_config)
job_instance.run()
job = job(config)
job.run()

self.log.info("All jobs finished, exiting")

Expand Down
10 changes: 4 additions & 6 deletions endure.toml
Original file line number Diff line number Diff line change
Expand Up @@ -175,8 +175,12 @@ acquisition_function = "ExpectedImprovement"
beta_value = 0.3
# model_type can take values - "Classic", "QFixed", "YZHybrid", "KHybrid"
model_type = "KHybrid"
# determines how many workloads do we want to test using the bayesian pipeline
multi_jobs_number = 100
multi_job_file = "design_comparison.csv"

[job.BayesianOptimization.database]
data_dir = "databases"
# This will take value 0 and 1 where 1 means write each cost and run details into the MySqlLite database
# and 0 means run details are not stored in the database
write_to_db = 1
Expand All @@ -199,12 +203,6 @@ z1 = 0.190
q = 0.545
w = 0.202

[job.BayesianOptimization.bounds]
h_min = 1.0
h_max = 10.0
T_min = 2.0
T_max = 31.0

# =============================================================================
# HEADER LSM
# Generic LSM settings including maximum bounds, system settings, starting
Expand Down
6 changes: 3 additions & 3 deletions jobs/bayesian_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def __init__(self, conf: dict) -> None:
self.bounds = LSMBounds()
self.max_levels = self.bounds.max_considered_levels
self.cf: EndureCost = EndureCost(self.max_levels)
# self.log: logging.Logger = logging.getLogger(self.config["log"]["name"])
self.log: logging.Logger = logging.getLogger(self.config["log"]["name"])

self.system: System = System(**self.bayesian_setting["system"])
self.workload: Workload = Workload(**self.bayesian_setting["workload"])
Expand Down Expand Up @@ -129,8 +129,8 @@ def optimization_loop(self, train_x, train_y, best_y):
new_designs, costs = self.evaluate_new_candidates(new_candidates)
train_x, train_y, best_y, best_designs = self.update_training_data(train_x, train_y, new_candidates, costs,
best_designs)
# self.log.debug(f"Iteration {i + 1}/{self.num_iterations} complete")
# self.log.debug("Bayesian Optimization completed")
self.log.debug(f"Iteration {i + 1}/{self.num_iterations} complete")
self.log.debug("Bayesian Optimization completed")
return best_designs

def _initialize_feature_list(self, bounds):
Expand Down
54 changes: 0 additions & 54 deletions jobs/config/BayesianBaseline.toml

This file was deleted.

Loading