diff --git a/docs/CFU_Installation_Steps.md b/docs/CFU_Installation_Steps.md deleted file mode 100644 index 8c9907f7..00000000 --- a/docs/CFU_Installation_Steps.md +++ /dev/null @@ -1,23 +0,0 @@ -# Arch gym - CFU Playground installation - -An installation script has been provided to install all dependencies in one go. It's recommended to run this as the superuser, as it asks for your password during execution: - -For e.g. use ```sudo bash ./install_sim.sh cfu```, if you want to use bash. You can also set the current terminal to superuser using ```sudo su```, and then run the script normally as ```./install_sim cfu``` - -If you want to manually install cfu, follow these steps: -- In the oss-arch-gym directory, run -```sh -git submodule update --init sims/CFU-Playground/CFU-Playground -``` - -- Move into the CFU Playrgoud directory: -```sh -cd sims/CFU-Playground/CFU-Playground -``` -- Run the following from this location: -```sh -./scripts/setup -./scripts/setup_vexriscv_build.sh -make install-sf -``` -Now you should be able to use the CFU-env gym environment. \ No newline at end of file diff --git a/docs/Vizier_installation.md b/docs/Vizier_installation.md deleted file mode 100644 index d214975e..00000000 --- a/docs/Vizier_installation.md +++ /dev/null @@ -1,10 +0,0 @@ -# Arch gym - Vizier installation guide - -An installation script has been provided to install vizier and its dependencies easily. To properly install vizier into arch-gym, ensure that you have installed the arch-gym conda environment. - -Open a shell in the oss-arch-gym directory, and run the following commands: - -- ```conda activate arch-gym``` -- ```./install_sim.sh viz``` - -This will install vizier and its dependencies into the arch-gym environment. \ No newline at end of file diff --git a/sims/CFU-Playground/train_randomwalker.py b/sims/CFU-Playground/train_randomwalker.py deleted file mode 100644 index 3faa54c5..00000000 --- a/sims/CFU-Playground/train_randomwalker.py +++ /dev/null @@ -1,69 +0,0 @@ -from absl import flags, app -import os -import envlogger -from envlogger.testing import catch_env -import sys - -sys.path.append('../../arch_gym/envs') -import CFUPlayground_wrapper - -flags.DEFINE_string('workload', 'micro_speech', 'workload the processor is being optimized for') -flags.DEFINE_integer('num_steps', 1, 'Number of training steps.') -flags.DEFINE_bool('use_envlogger', True, 'Use envlogger to log the data.') -flags.DEFINE_string('traject_dir', 'random_walker_trajectories', 'Directory to save the dataset.') -flags.DEFINE_string('summary_dir', ".", 'Directory to save the dataset.') -flags.DEFINE_string('reward_formulation', 'both', 'The kind of reward we are optimizing for') - -FLAGS = flags.FLAGS - -envdm = catch_env.Catch() - - -def wrap_in_envlogger(env,envlogger_dir): - metadata = { - 'agent_type' : 'RandomWalker', - 'num_steps': FLAGS.num_steps, - 'env_type': type(env).__name__, - } - if FLAGS.use_envlogger: - env = envlogger.EnvLogger(env, data_directory=envlogger_dir, max_episodes_per_file=1000, metadata=metadata) - return env - else: - return env - -def main(_): - env = CFUPlayground_wrapper.make_cfuplaygroundEnv(target_vals = [1000, 1000],rl_form='random_walker', reward_type = FLAGS.reward_formulation, max_steps = FLAGS.num_steps, workload = FLAGS.workload) - # experiment name - exp_name = FLAGS.workload + "_num_steps_" + str(FLAGS.num_steps) + "_reward_type+" + FLAGS.reward_formulation - # append logs to base path - log_path = os.path.join(FLAGS.summary_dir, 'random_walker_logs', FLAGS.reward_formulation, exp_name) - # get the current working directory and append the exp name - traject_dir = os.path.join(FLAGS.summary_dir, FLAGS.traject_dir, FLAGS.reward_formulation, exp_name) - # check if log_path exists else create it - if not os.path.exists(log_path): - os.makedirs(log_path) - if FLAGS.use_envlogger: - if not os.path.exists(traject_dir): - os.makedirs(traject_dir) - env = wrap_in_envlogger(env, traject_dir) - env.reset() - - for step in range(FLAGS.num_steps): - print("________________________________________________________\n", - "________________________________________________________\n", - "________________________________________________________\n", - "________________________________________________________\n") - print("ITERATION NUMBER: ",step+1," OUT OF: ",FLAGS.num_steps) - print("________________________________________________________\n", - "________________________________________________________\n", - "________________________________________________________\n", - "________________________________________________________\n") - # generate random actions - action = env.action_space.sample() - - obs, reward, done, info = env.step(action) - - env.close() - -if __name__ == '__main__': - app.run(main)