Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/main' into ol_integration
Browse files Browse the repository at this point in the history
  • Loading branch information
ShvetankPrakash committed Oct 21, 2023
2 parents eda65d2 + ccbefc1 commit fc0f785
Show file tree
Hide file tree
Showing 132 changed files with 14,237 additions and 686 deletions.
8 changes: 8 additions & 0 deletions .readthedocs.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
version: 2
conda:
environment: environment_old.yml
build:
os: "ubuntu-20.04"
tools:
python: "mambaforge-22.9"

729 changes: 729 additions & 0 deletions ArchGym_Intro ipynb.ipynb

Large diffs are not rendered by default.

44 changes: 22 additions & 22 deletions acme/acme/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,30 +1,30 @@
absl-py
jaxlib==0.3.5
bsuite
tensorflow_probability==0.15.0
dm-env
jax==0.3.6
optax
rlds
dm-control==0.0.364896371
atari-py
gym
pillow
gym[atari]
dm-haiku
dm-sonnet
tensorflow-estimator==2.8.0
dm-tree
dm-launchpad==0.5.2
pygame==2.1.0
typing-extensions
tensorflow-datasets==4.5.2
chex
rlax
gym
tensorflow_probability==0.15.0
tensorflow==2.8.0
tensorflow-estimator==2.8.0
dm-tree
flax
dm-haiku
bsuite
keras==2.8.0
optax
absl-py
flax
numpy==1.22.4
trfl
tensorflow-datasets==4.5.2
rlds
jaxlib==0.3.5
dm-reverb==0.7.2
chex
trfl
gym[atari]
jax==0.3.6
dm-sonnet
dm-control==0.0.364896371
pillow
rlax
atari-py
pygame==2.1.0
dm-reverb==0.7.2
2 changes: 1 addition & 1 deletion acme/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@
jax_requirements = [
'chex',
'jax==0.3.6', # Update when TF2.9 is release.
'jaxlib==0.3.5', # Update when TF2.9 is release.
'jaxlib==0.3.15', # Update when TF2.9 is release.
'dm-haiku',
'flax',
'optax',
Expand Down
4 changes: 3 additions & 1 deletion arch_gym/envs/AstraSimEnv.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,9 @@ def calculate_reward(self, observations):

# give it one action: one set of parameters from json file
def step(self, action_dict):

"""
Step function for the environment
"""
# write the three config files
# with open(self.network_config, "w") as outfile:
# outfile.write(json.dumps(action_dict['network'], indent=4))
Expand Down
7 changes: 6 additions & 1 deletion arch_gym/envs/AstraSimWrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,9 @@ def reset(self) -> dm_env.TimeStep:


def step(self, action: types.NestedArray) -> dm_env.TimeStep:
"""Steps the environment."""
"""
Steps the environment.
"""
if self._reset_next_step:
return self.reset()
if(self.env_wrapper_sel=='macme' or self.env_wrapper_sel=='macme_continuous'):
Expand Down Expand Up @@ -125,6 +127,9 @@ def __getattr__(self, name: str):
return getattr(self._environment, name)

def close(self):
"""
Closes the environment.
"""
self._environment.close()


Expand Down
3 changes: 3 additions & 0 deletions arch_gym/envs/RandomParameterEnv.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,9 @@ def __init__(self, natural=False):
self.reset()

def step(self, action):
"""
Step function for the environment
"""

self.stepN = self.stepN + 1
self.steps = self.steps + 1
Expand Down
File renamed without changes.
39 changes: 15 additions & 24 deletions arch_gym/envs/custom_env.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,21 +9,26 @@
class CustomEnv(gym.Env):
def __init__(self, max_steps=10):
super(CustomEnv, self).__init__()
self.observation_space = spaces.Dict({"energy": spaces.Box(0, 1, (1,)), "area": spaces.Box(0, 1, (1,)), "latency": spaces.Box(0, 1, (1,))})
self.observation_space = spaces.Dict({"energy": spaces.Box(0, 1, (1,)),
"area": spaces.Box(0, 1, (1,)),
"latency": spaces.Box(0, 1, (1,))})

self.action_space = spaces.Dict({"num_cores": spaces.Discrete(15), "freq": spaces.Box(low = 0.5, high = 3, dtype = float),
"mem_type": spaces.Discrete(3), "mem_size": spaces.Discrete(65)})
# mem_type_list = ['DRAM', 'SRAM', 'Hybrid']
self.action_space = spaces.Dict(
{"num_cores": spaces.Discrete(15),
"freq": spaces.Box(low = 0.5, high = 3, dtype = float),
"mem_type": spaces.Discrete(3), # mem_type is one of 'DRAM', 'SRAM', 'Hybrid'
"mem_size": spaces.Discrete(65)})


self.max_steps = max_steps
self.counter = 0
self.energy = 10
self.area = 15
self.latency = 12
self.energy = 0
self.area = 0
self.latency = 0
self.initial_state = np.array([self.energy, self.area, self.latency])
self.observation = None
self.done = False

self.ideal = np.array([4, 2.0, 1, 32]) #ideal values for action space [num_cores, freq, mem_type, mem_size]

def reset(self):
return self.initial_state
Expand All @@ -42,31 +47,17 @@ def step(self, action):
self.reset()
else:
self.counter += 1

# Compute the new state based on the action (random formulae for now)
self.energy += num_cores*1 + freq*2 + mem_size*3
self.area += num_cores*2 + freq*3 + mem_size*1
self.latency += num_cores*3 + freq*3 + mem_size*1

observation = np.array([self.energy, self.area, self.latency])
ideal_values = np.array([4, 2.0, 1, 32])
print("custom env", observation)

self.observation = observation
# print(action)
# print(ideal_values)
reward = -np.linalg.norm(action - ideal_values)
reward = -np.linalg.norm(action - self.ideal)

print("custom env rew", reward)

return observation, reward, self.done, {}

def render(self, mode='human'):
print (f'Energy: {self.energy}, Area: {self.area}, Latency: {self.latency}')
# def main(self):

# # observation = self.reset()
# # print("The initial observation is {}".format(observation))
# action = self.action_space.sample()
# print("The taken action is {}".format(action))
# obs, reward, done, info = self.step(action)
# print("The outcome is {}".format(reward))
51 changes: 0 additions & 51 deletions arch_gym/envs/custom_gym.py

This file was deleted.

3 changes: 3 additions & 0 deletions arch_gym/envs/simpleEnv.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,9 @@ def reset(self):
return self.state

def step(self, action):
"""
Step function for the environment
"""
if self.done:
print("Episode Done!")
elif (self.count == self.max_steps):
Expand Down
Binary file added docs/build/doctrees/arch_gym.doctree
Binary file not shown.
Binary file added docs/build/doctrees/arch_gym.envs.doctree
Binary file not shown.
Binary file added docs/build/doctrees/customenv.doctree
Binary file not shown.
Binary file added docs/build/doctrees/index.doctree
Binary file not shown.
Binary file added docs/build/doctrees/modules_arch_gym.doctree
Binary file not shown.
Binary file added docs/build/doctrees/modules_sims.doctree
Binary file not shown.
Binary file added docs/build/doctrees/sims.AstraSim.doctree
Binary file not shown.
Binary file added docs/build/doctrees/sims.customenv.doctree
Binary file not shown.
Binary file added docs/build/doctrees/sims.doctree
Binary file not shown.
4 changes: 4 additions & 0 deletions docs/build/html/.buildinfo
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
# Sphinx build info version 1
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
config: 2204b31f9468ba8dbee3f93a98f6fbf5
tags: 645f666f9bcd5a90fca523b33c5a78b7
Loading

0 comments on commit fc0f785

Please sign in to comment.