forked from PaddlePaddle/PARL
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathatari_agent.py
executable file
·122 lines (104 loc) · 4.68 KB
/
atari_agent.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle.fluid as fluid
import parl
from parl import layers
from parl.utils import machine_info
class AtariAgent(parl.Agent):
def __init__(self, algorithm, obs_shape, act_dim,
learn_data_provider=None):
assert isinstance(obs_shape, (list, tuple))
assert isinstance(act_dim, int)
self.obs_shape = obs_shape
self.act_dim = act_dim
super(AtariAgent, self).__init__(algorithm)
if learn_data_provider:
self.learn_reader.decorate_tensor_provider(learn_data_provider)
self.learn_reader.start()
def build_program(self):
self.sample_program = fluid.Program()
self.predict_program = fluid.Program()
self.learn_program = fluid.Program()
with fluid.program_guard(self.sample_program):
obs = layers.data(
name='obs', shape=self.obs_shape, dtype='float32')
self.sample_actions, self.behaviour_logits = self.alg.sample(obs)
with fluid.program_guard(self.predict_program):
obs = layers.data(
name='obs', shape=self.obs_shape, dtype='float32')
self.predict_actions = self.alg.predict(obs)
with fluid.program_guard(self.learn_program):
obs = layers.data(
name='obs', shape=self.obs_shape, dtype='float32')
actions = layers.data(name='actions', shape=[], dtype='int64')
behaviour_logits = layers.data(
name='behaviour_logits', shape=[self.act_dim], dtype='float32')
rewards = layers.data(name='rewards', shape=[], dtype='float32')
dones = layers.data(name='dones', shape=[], dtype='float32')
lr = layers.data(
name='lr', shape=[1], dtype='float32', append_batch_size=False)
entropy_coeff = layers.data(
name='entropy_coeff',
shape=[1],
dtype='float32',
append_batch_size=False)
self.learn_reader = fluid.layers.create_py_reader_by_data(
capacity=32,
feed_list=[
obs, actions, behaviour_logits, rewards, dones, lr,
entropy_coeff
])
obs, actions, behaviour_logits, rewards, dones, lr, entropy_coeff = fluid.layers.read_file(
self.learn_reader)
vtrace_loss, kl = self.alg.learn(obs, actions, behaviour_logits,
rewards, dones, lr, entropy_coeff)
self.learn_outputs = [
vtrace_loss.total_loss, vtrace_loss.pi_loss,
vtrace_loss.vf_loss, vtrace_loss.entropy, kl
]
self.learn_program = parl.compile(self.learn_program,
vtrace_loss.total_loss)
def sample(self, obs_np):
"""
Args:
obs_np: a numpy float32 array of shape ([B] + observation_space).
Format of image input should be NCHW format.
Returns:
sample_ids: a numpy int64 array of shape [B]
"""
obs_np = obs_np.astype('float32')
sample_actions, behaviour_logits = self.fluid_executor.run(
self.sample_program,
feed={'obs': obs_np},
fetch_list=[self.sample_actions, self.behaviour_logits])
return sample_actions, behaviour_logits
def predict(self, obs_np):
"""
Args:
obs_np: a numpy float32 array of shape ([B] + observation_space)
Format of image input should be NCHW format.
Returns:
sample_ids: a numpy int64 array of shape [B]
"""
obs_np = obs_np.astype('float32')
predict_actions = self.fluid_executor.run(
self.predict_program,
feed={'obs': obs_np},
fetch_list=[self.predict_actions])[0]
return predict_actions
def learn(self):
total_loss, pi_loss, vf_loss, entropy, kl = self.fluid_executor.run(
self.learn_program, fetch_list=self.learn_outputs)
return total_loss, pi_loss, vf_loss, entropy, kl