-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.py
69 lines (56 loc) · 2.22 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import gym_super_mario_bros as bros
import os
from nes_py.wrappers import JoypadSpace
from gym_super_mario_bros.actions import SIMPLE_MOVEMENT
from gym.wrappers import GrayScaleObservation as GSO
from stable_baselines3 import PPO
from stable_baselines3.common.callbacks import BaseCallback
from stable_baselines3.common.vec_env import VecFrameStack, DummyVecEnv
checkpoint_dir = "./train/"
logs_dir = "./logs/"
# Create game environment
env = bros.make('SuperMarioBros-v0')
# Simplifies the controls
env = JoypadSpace(env, SIMPLE_MOVEMENT)
# Greyscale to reduce processing requirements allowing for faster performance
env = GSO(env, True)
# Wrap with the dummy env
env = DummyVecEnv([lambda: env])
# Stack the frames, so we have "memory" so to speak
env = VecFrameStack(env, 5, channels_order='last')
# For saving progress
class TrainAndLoggingCallback(BaseCallback):
def __init__(self, check_freq, save_path, verbose=1):
super(TrainAndLoggingCallback, self).__init__(verbose)
self.check_freq = check_freq
self.save_path = save_path
def _init_callback(self):
if self.save_path is not None:
os.makedirs(self.save_path, exist_ok=True)
def _on_step(self):
if self.n_calls % self.check_freq == 0:
model_path = os.path.join(self.save_path, 'best_model_{}'.format(self.n_calls))
self.model.save(model_path)
return True
callback = TrainAndLoggingCallback(check_freq=25000, save_path=checkpoint_dir)
# The actual AI model
#model = PPO('CnnPolicy', env, verbose=1, tensorboard_log=logs_dir, learning_rate=0.000001, n_steps=1024)
model = PPO.load('./train/best_model_100000.zip', env=env, tensorboard_log=logs_dir, learning_rate=0.000001, n_steps=1024, print_system_info=True)
#model.learn(total_timesteps=1000000, callback=callback)
state = env.reset()
while True:
action, _ = model.predict(state)
state, reward, done, info = env.step(action)
env.render()
'''#Create flag, restart or not
done = True
# Loop through each frame in the game
for step in range(100000):
if done:
# start the game
env.reset()
# do random actions
state, reward, done, info = env.step(env.action_space.sample())
# show the game
env.render()
env.close()'''