-
Notifications
You must be signed in to change notification settings - Fork 1
/
config.ini
68 lines (63 loc) · 1.42 KB
/
config.ini
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
# Set the log level of the application
[LOGGING]
# Verbosity
VERBOSE = False
# Randomization settings
[RANDOMIZATION]
# Seed
SEED = 1
# enable domain randomization
DOMAIN_RAND = False
# enable dynamics randomization
DYNAMICS_RAND = False
# Unsure
DISTORTION = False
CAMERA_RAND = False
# Simulator Settings
[SETUP]
# Environment Name
ENV = Duckietown-reinforcement-learning-v0
# Map Name
MAP = reinforcement_learning.yaml
# Draw road tile curves
DRAW_CURVE = True
# Draw bounding box of agent
DRAW_BBOX = True
# Number of frames to skip
FRAME_SKIP = 1
# Camera modes: human, top_down, free_cam, rgb_array
CAM_MODE = top_down
[AGENTS]
# Number of agents
NUM_RANDOM_AGENTS = 1
# Max agents
MAX_AGENTS = 8
# Minimum distance before collision detection
SAFETY_FACTOR = 1.0
[LEARNING]
# Save the models
SAVE_MODELS = TRUE
# Save directory
MODEL_DIR = learning/reinforcement/q-learning/models/
# Test model path
TEST_MODEL_PATH = learning/reinforcement/q-learning/models/defensive/episodebatch_999
# Learning Rate
ALPHA = 0.1
# Learning Rate Decay
LEARNING_RATE_DECAY = 0.5
# Discount Factor
GAMMA = 0.8
# E-Greedy Action Choice
EPSILON = 0.35
# Number of episodes
NUM_EPISODES = 1000
# number of testing iterations
NUM_ITERATIONS = 100
# Model number we want to test
MODEL_NUM = 0
# Reward Profile: 0 = Pathological 1= Reckless 2=Defensive
REWARD_PROFILE = 2
# Render the learning
RENDER_STEPS = 0
# Render Steps for testing
RENDER_STEPS_TEST= 10