forked from atang020/reinforcement
-
Notifications
You must be signed in to change notification settings - Fork 1
/
valueIterationAgents.py
109 lines (93 loc) · 4.05 KB
/
valueIterationAgents.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
# valueIterationAgents.py
# -----------------------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero ([email protected]) and Dan Klein ([email protected]).
# Student side autograding was added by Brad Miller, Nick Hay, and Pieter
# Abbeel in Spring 2013.
# For more info, see http://inst.eecs.berkeley.edu/~cs188/pacman/pacman.html
import mdp, util
from learningAgents import ValueEstimationAgent
class ValueIterationAgent(ValueEstimationAgent):
"""
* Please read learningAgents.py before reading this.*
A ValueIterationAgent takes a Markov decision process
(see mdp.py) on initialization and runs value iteration
for a given number of iterations using the supplied
discount factor.
"""
def __init__(self, mdp, discount = 0.9, iterations = 100):
"""
Your value iteration agent should take an mdp on
construction, run the indicated number of iterations
and then act according to the resulting policy.
Some useful mdp methods you will use:
mdp.getStates()
mdp.getPossibleActions(state)
mdp.getTransitionStatesAndProbs(state, action)
mdp.getReward(state, action, nextState)
mdp.isTerminal(state)
"""
self.mdp = mdp
self.discount = discount
self.iterations = iterations
self.values = util.Counter() # A Counter is a dict with default 0
# Write value iteration code here
"*** YOUR CODE HERE ***"
for _ in range(0,self.iterations):
tmpValues = util.Counter()
for state in self.mdp.getStates():
if self.mdp.isTerminal(state):
tmpValues[state] = 0
else:
maxvalue = float("-inf")
for action in self.mdp.getPossibleActions(state):
total = 0
for nextState, prob in self.mdp.getTransitionStatesAndProbs(state,action):
total += prob * (self.mdp.getReward(state,action,nextState) + (self.discount*self.values[nextState]))
maxvalue = max(total, maxvalue)
tmpValues[state] = maxvalue
self.values = tmpValues
def getValue(self, state):
"""
Return the value of the state (computed in __init__).
"""
return self.values[state]
def computeQValueFromValues(self, state, action):
"""
Compute the Q-value of action in state from the
value function stored in self.values.
"""
"*** YOUR CODE HERE ***"
total = 0
for nextState, prob in self.mdp.getTransitionStatesAndProbs(state,action):
total += prob * (self.mdp.getReward(state,action,nextState) + (self.discount*self.values[nextState]))
return total
util.raiseNotDefined()
def computeActionFromValues(self, state):
"""
The policy is the best action in the given state
according to the values currently stored in self.values.
You may break ties any way you see fit. Note that if
there are no legal actions, which is the case at the
terminal state, you should return None.
"""
"*** YOUR CODE HERE ***"
if self.mdp.isTerminal(state):
return None
value, policy = float("-inf"), None
for action in self.mdp.getPossibleActions(state):
tmp = self.computeQValueFromValues(state, action)
if tmp>=value:
value = tmp
policy = action
return policy
util.raiseNotDefined()
def getPolicy(self, state):
return self.computeActionFromValues(state)
def getAction(self, state):
"Returns the policy at the state (no exploration)."
return self.computeActionFromValues(state)
def getQValue(self, state, action):
return self.computeQValueFromValues(state, action)