forked from eliobartos/ReinforcementLearning
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathMDP.R
75 lines (53 loc) · 2.61 KB
/
MDP.R
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
library(tidyverse)
source("functions.R")
############################## Define MDP
n_s = 3 #Number of states (states are labeled as 1, 2, 3...)
n_a = 2 # Actions: 1 - Left, 2 - Right (actions are labeled as 1, 2, 3...)
P = list(2) # Transition matrices for action 1 - left, 2 - right
P[[1]] = matrix(c(1, 0, 0,
1, 0, 0,
0, 1, 0), nrow = n_s, byrow = TRUE) # Transition matrix if we choose action 1
P[[2]] = matrix(c(0, 1, 0,
0, 0, 1,
0, 0, 1), nrow = n_s, byrow = TRUE) # Transition matrix if we choose action 2
R = list(2) #Rewards depending on action and state
R[[1]] = matrix(c(0, 1, 2), nrow = 3) # For action 1
R[[2]] = matrix(c(1, 2, 0), nrow = 3) # For action 2
gamma = 0.99 # Discount factor
MDP = list(n_s = n_s, n_a = n_a, P = P, R = R, gamma = gamma) # Fully Defined MPD
policy = matrix(c(1, 0,
1, 0,
1, 0), byrow = TRUE, nrow = 3) # For every state probability over actions, states are rows
######################################
############################## Define MDP 2
n_s = 4 #Number of states (states are labeled as 1, 2, 3...)
n_a = 2 # Actions: 1 - olovka, 2 - kemijska (actions are labeled as 1, 2, 3...)
P = list(2) # Transition matrices for action 1 - olovka, 2 - kemijska
P[[1]] = matrix(c(0, 0.8, 0.2, 0,
0, 0, 0, 1,
0, 0, 0, 1,
0, 0, 0, 1), nrow = n_s, byrow = TRUE) # Transition matrix if we choose action 1
P[[2]] = matrix(c(0, 0.2, 0.8, 0,
0, 0, 0, 1,
0, 0.1, 0, 0.9,
0, 0, 0, 1), nrow = n_s, byrow = TRUE) # Transition matrix if we choose action 2
R = list(2) #Rewards depending on action and state
R[[1]] = matrix(c(1.2, 1, 1, 0), nrow = n_s) # For action 1
R[[2]] = matrix(c(1.8, 1, 1.4, 0), nrow = n_s) # For action 2
gamma = 0.99 # Discount factor
MDP = list(n_s = n_s, n_a = n_a, P = P, R = R, gamma = gamma) # Fully Defined MPD
policy = matrix(c(0, 1,
0, 1,
0, 1,
0, 1), byrow = TRUE, nrow = 4) # For every state probability over actions, states are rows
# From state to state transition reward (for simulations of episodes)
R_mat = matrix(c(0, 1, 2, 0,
0, 0, 0, 1,
0, 5, 0, 1,
0, 0, 0, 0), nrow = n_s, byrow = TRUE)
# Expected reward for action and state
R[[1]] = diag(P[[1]] %*% t(R_mat))
R[[2]] = diag(P[[2]] %*% t(R_mat))
MDP$R_mat = R_mat
######################################
policy_iteration(MDP)