Skip to content

Commit

Permalink
updating evaluation.py for CC3
Browse files Browse the repository at this point in the history
  • Loading branch information
maxstanden committed Oct 11, 2022
1 parent 91d725c commit bfae42d
Show file tree
Hide file tree
Showing 2 changed files with 36 additions and 56 deletions.
7 changes: 1 addition & 6 deletions CybORG/Agents/Wrappers/PettingZooParallelWrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@


class PettingZooParallelWrapper(BaseWrapper):
def __init__(self, env: CybORG, max_steps: int = 100):
def __init__(self, env: CybORG):
super().__init__(env)
self._agent_ids = self.possible_agents
# assuming that the final value in the agent name indicates which drone that agent is on
Expand All @@ -28,8 +28,6 @@ def __init__(self, env: CybORG, max_steps: int = 100):
[3] + [2 for i in range(num_drones)] + [2] + [3 for i in range(num_drones)] + [101, 101] + (
num_drones - 1) * [num_drones, 101, 101, 2]) for agent_name in self.possible_agents}
self.metadata = {"render_modes": ["human", "rgb_array"], "name": "Cage_Challenge_3"}
self.max_steps = max_steps
self.step_counter = 0
self.seed = 117

self.dones = {agent: False for agent in self.possible_agents}
Expand All @@ -44,7 +42,6 @@ def reset(self,
self.dones = {agent: False for agent in self.possible_agents}
self.rewards = {agent: 0. for agent in self.possible_agents}
self.infos = {}
self.step_counter = 0
# assuming that the final value in the agent name indicates which drone that agent is on
self.agent_host_map = {agent_name: f'drone_{agent_name.split("_")[-1]}' for agent_name in self.possible_agents}
self.ip_addresses = list(self.env.get_ip_map().values())
Expand All @@ -60,9 +57,7 @@ def step(self, actions: dict) -> (dict, dict, dict, dict):
# rews = GreenAvailabilityRewardCalculator(raw_obs, ['green_agent_0','green_agent_1', 'green_agent_2' ]).calculate_reward()
obs = {agent: self.observation_change(agent, raw_obs[agent]) for agent in self.env.active_agents}
# obs = {agent: self.observation_change(agent, obs) for agent in self.possible_agents}
self.step_counter += 1
# set done to true if maximumum steps are reached
dones = {agent: self.step_counter >= self.max_steps or dones[agent] for agent in self.agents}
self.dones.update(dones)
self.rewards = {agent: float(sum(rews[agent].values())) for agent in self.env.active_agents}
# send messages
Expand Down
85 changes: 35 additions & 50 deletions CybORG/Evaluation/evaluation.py
Original file line number Diff line number Diff line change
@@ -1,34 +1,24 @@
import subprocess
import inspect
import subprocess
import time
from statistics import mean, stdev

from CybORG import CybORG, CYBORG_VERSION
from CybORG.Agents import B_lineAgent, SleepAgent, RandomAgent
from CybORG.Agents.SimpleAgents.BaseAgent import BaseAgent
#from CybORG.Agents.SimpleAgents.BlueLoadAgent import BlueLoadAgent
from CybORG.Agents.SimpleAgents.BlueReactAgent import BlueReactRemoveAgent
from CybORG.Agents.SimpleAgents.Meander import RedMeanderAgent
from CybORG.Agents.Wrappers.EnumActionWrapper import EnumActionWrapper
from CybORG.Agents.Wrappers.FixedFlatWrapper import FixedFlatWrapper
from CybORG.Agents.Wrappers.OpenAIGymWrapper import OpenAIGymWrapper
from CybORG.Agents import RandomAgent
from CybORG.Agents.Wrappers.PettingZooParallelWrapper import PettingZooParallelWrapper

from CybORG.Agents.Wrappers import ChallengeWrapper

from CybORG.Simulator.Scenarios import FileReaderScenarioGenerator, DroneSwarmScenarioGenerator

from CybORG.Simulator.Scenarios import DroneSwarmScenarioGenerator

MAX_EPS = 100
config = {'num_drones': 20,
'max_length_data_links': 20}


def wrap(env):
return PettingZooParallelWrapper(env=env)


def get_git_revision_hash() -> str:
return subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode('ascii').strip()


if __name__ == "__main__":
cyborg_version = CYBORG_VERSION
scenario = 'Scenario3'
Expand All @@ -43,9 +33,7 @@ def get_git_revision_hash() -> str:
lines = inspect.getsource(wrap)
wrap_line = lines.split('\n')[1].split('return ')[1]

path = str(inspect.getfile(CybORG))
path = path[:-7] + f'/Simulator/Scenarios/scenario_files/Scenario2.yaml'
sg = DroneSwarmScenarioGenerator(**config)
sg = DroneSwarmScenarioGenerator()
cyborg = CybORG(sg, 'sim')
wrapped_cyborg = wrap(cyborg)

Expand All @@ -54,42 +42,39 @@ def get_git_revision_hash() -> str:

print(f'Using agents {agents}, if this is incorrect please update the code to load in your agent')

file_name = str(inspect.getfile(CybORG))[:-10] + '/Evaluation/' + time.strftime("%Y%m%d_%H%M%S") + '.txt'
file_name = str(inspect.getfile(CybORG))[:-7] + '/Evaluation/' + time.strftime("%Y%m%d_%H%M%S") + '.txt'
print(f'Saving evaluation results to {file_name}')
with open(file_name, 'a+') as data:
data.write(f'CybORG v{cyborg_version}, {scenario}\n')
data.write(f'author: {name}, team: {team}, technique: {name_of_agent}\n')
data.write(f"wrappers: {wrap_line}\n")
data.write(f"agent assignment: {agents}")


print(f'using CybORG v{cyborg_version}, {scenario}\n')
for num_steps in [30, 50, 100]:
for red_agent in [B_lineAgent, RedMeanderAgent, SleepAgent]:

cyborg = CybORG(sg, 'sim')
wrapped_cyborg = wrap(cyborg)


total_reward = []
actions_log = []
for i in range(MAX_EPS):
observations = wrapped_cyborg.reset()
action_spaces = wrapped_cyborg.action_spaces
r = []
a = []
# cyborg.env.env.tracker.render()
for j in range(num_steps):
actions = {agent_name: agents[agent_name].get_action(observations[agent_name], action_spaces[agent_name]) for agent_name in wrapped_cyborg.agents}
observations, rew, done, info = wrapped_cyborg.step(actions)
r.append(mean(rew.values()))
a.append({agent_name: str(cyborg.get_last_action(agent_name)) for agent_name in wrapped_cyborg.env.agents})
if all(done.values()):
break
total_reward.append(sum(r))
actions_log.append(a)
print(f'Average reward for red agent {red_agent.__name__} and steps {num_steps} is: {mean(total_reward)} with a standard deviation of {stdev(total_reward)}')
with open(file_name, 'a+') as data:
data.write(f'steps: {num_steps}, adversary: {red_agent.__name__}, mean: {mean(total_reward)}, standard deviation {stdev(total_reward)}\n')
for act, sum_rew in zip(actions_log, total_reward):
data.write(f'actions: {act}, total reward: {sum_rew}\n')

cyborg = CybORG(sg, 'sim')
wrapped_cyborg = wrap(cyborg)


total_reward = []
actions_log = []
for i in range(MAX_EPS):
observations = wrapped_cyborg.reset()
action_spaces = wrapped_cyborg.action_spaces
r = []
a = []
# cyborg.env.env.tracker.render()
for j in range(500):
actions = {agent_name: agents[agent_name].get_action(observations[agent_name], action_spaces[agent_name]) for agent_name in wrapped_cyborg.agents}
observations, rew, done, info = wrapped_cyborg.step(actions)
if all(done.values()):
break
r.append(mean(rew.values()))
a.append({agent_name: str(cyborg.get_last_action(agent_name)) for agent_name in wrapped_cyborg.agents})
total_reward.append(sum(r))
actions_log.append(a)
print(f'Average reward is: {mean(total_reward)} with a standard deviation of {stdev(total_reward)}')
with open(file_name, 'a+') as data:
data.write(f'mean: {mean(total_reward)}, standard deviation {stdev(total_reward)}\n')
for act, sum_rew in zip(actions_log, total_reward):
data.write(f'actions: {act}, total reward: {sum_rew}\n')

0 comments on commit bfae42d

Please sign in to comment.