aboutsummaryrefslogtreecommitdiffstats
path: root/src/valueIterationAgents.py
blob: 01ca445bdb2598eea1a98ae315db1a7a162e40f5 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
# valueIterationAgents.py
# -----------------------
# Licensing Information:  You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).


import mdp, util

from learningAgents import ValueEstimationAgent


class ValueIterationAgent(ValueEstimationAgent):
    """
    * Please read learningAgents.py before reading this.*

    A ValueIterationAgent takes a Markov decision process
    (see mdp.py) on initialization and runs value iteration
    for a given number of iterations using the supplied
    discount factor.
    """

    def __init__(self, mdp, discount=0.9, iterations=100):
        """
        Your value iteration agent should take an mdp on
        construction, run the indicated number of iterations
        and then act according to the resulting policy.

        Some useful mdp methods you will use:
            mdp.getStates()
            mdp.getPossibleActions(state)
            mdp.getTransitionStatesAndProbs(state, action)
            mdp.getReward(state, action, nextState)
            mdp.isTerminal(state)
        """
        self.mdp = mdp
        self.discount = discount
        self.iterations = iterations
        self.values = util.Counter()  # A Counter is a dict with default 0

        # Write value iteration code here
        "*** YOUR CODE HERE ***"
        # This initializes the Value Iteration Agent with values so that the agent can create policies

        # for x number of iterations
        for _ in range(self.iterations):
            # make a copy of the current values
            lastValues = self.values.copy()

            # for every state
            for state in self.mdp.getStates():
                computedValues = []
                # for every action
                for action in self.mdp.getPossibleActions(state):
                    # Store the updated Q-Value
                    computedValue = self.computeQValueFromValues(state, action)
                    computedValues.append(computedValue)

                if not self.mdp.isTerminal(state):
                    # Update the current values with the new max
                    lastValues[state] = max(computedValues)

            # update the (now old) values with the new values we just set
            self.values = lastValues.copy()

    def getValue(self, state):
        """
        Return the value of the state (computed in __init__).
        """
        return self.values[state]

    def computeQValueFromValues(self, state, action):
        """
        Compute the Q-value of action in state from the
        value function stored in self.values.
        """
        "*** YOUR CODE HERE ***"
        # This function computes the Q-Values for a specific action taken in a specific state.

        calculatedValue = 0

        # for each successor state and probability of that state occurring
        for nextState, probability in self.mdp.getTransitionStatesAndProbs(state, action):
            # This is the Value Iteration function
            calculatedValue += probability * (
                self.mdp.getReward(state, action, nextState)
                + self.discount * self.values[nextState]
            )

        return calculatedValue

    def computeActionFromValues(self, state):
        """
        The policy is the best action in the given state
        according to the values currently stored in self.values.

        You may break ties any way you see fit.  Note that if
        there are no legal actions, which is the case at the
        terminal state, you should return None.
        """
        "*** YOUR CODE HERE ***"
        # This function returns the best action to take from the current values
        if self.mdp.isTerminal(state):
            return None

        # gets all the Q-Values for every action
        qValues = util.Counter()
        for action in self.mdp.getPossibleActions(state):
            qValues[action] = self.computeQValueFromValues(state, action)

        # return the best action
        return qValues.argMax()

    def getPolicy(self, state):
        return self.computeActionFromValues(state)

    def getAction(self, state):
        "Returns the policy at the state (no exploration)."
        return self.computeActionFromValues(state)

    def getQValue(self, state, action):
        return self.computeQValueFromValues(state, action)