summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNeil Kollack <nkollack@gmail.com>2021-09-28 14:27:45 -0500
committerNeil Kollack <nkollack@gmail.com>2021-09-28 14:27:45 -0500
commitdd3dda9dcbb6bff01d066925757ec944621dc704 (patch)
tree3b30a7c4973892c78a4f215e92ac6426f9e7fc5d
parent41406c7ff0ff96998978e520f5c1a1f3874c3bd4 (diff)
intial commit
-rw-r--r--.vscode/launch.json26
-rw-r--r--VERSION1
-rw-r--r--autograder.py351
-rw-r--r--game.py729
-rw-r--r--ghostAgents.py81
-rw-r--r--grading.py282
-rw-r--r--graphicsDisplay.py679
-rw-r--r--graphicsUtils.py398
-rw-r--r--keyboardAgents.py84
-rw-r--r--layout.py149
-rw-r--r--layouts/capsuleClassic.lay7
-rw-r--r--layouts/contestClassic.lay9
-rw-r--r--layouts/mediumClassic.lay11
-rw-r--r--layouts/minimaxClassic.lay5
-rw-r--r--layouts/openClassic.lay9
-rw-r--r--layouts/originalClassic.lay27
-rw-r--r--layouts/smallClassic.lay7
-rw-r--r--layouts/testClassic.lay10
-rw-r--r--layouts/trappedClassic.lay5
-rw-r--r--layouts/trickyClassic.lay13
-rw-r--r--multiAgents.py195
-rw-r--r--multiagentTestClasses.py529
-rw-r--r--pacman.py684
-rw-r--r--pacmanAgents.py52
-rw-r--r--projectParams.py18
-rw-r--r--testClasses.py189
-rw-r--r--testParser.py85
-rw-r--r--test_cases/CONFIG1
-rw-r--r--test_cases/extra/CONFIG2
-rw-r--r--test_cases/extra/grade-agent.test11
-rw-r--r--test_cases/q1/CONFIG2
-rw-r--r--test_cases/q1/grade-agent.solution2
-rw-r--r--test_cases/q1/grade-agent.test18
-rw-r--r--test_cases/q2/0-lecture-6-tree.solution3
-rw-r--r--test_cases/q2/0-lecture-6-tree.test50
-rw-r--r--test_cases/q2/0-small-tree.solution3
-rw-r--r--test_cases/q2/0-small-tree.test36
-rw-r--r--test_cases/q2/1-1-minmax.solution3
-rw-r--r--test_cases/q2/1-1-minmax.test47
-rw-r--r--test_cases/q2/1-2-minmax.solution3
-rw-r--r--test_cases/q2/1-2-minmax.test47
-rw-r--r--test_cases/q2/1-3-minmax.solution3
-rw-r--r--test_cases/q2/1-3-minmax.test47
-rw-r--r--test_cases/q2/1-4-minmax.solution3
-rw-r--r--test_cases/q2/1-4-minmax.test47
-rw-r--r--test_cases/q2/1-5-minmax.solution3
-rw-r--r--test_cases/q2/1-5-minmax.test75
-rw-r--r--test_cases/q2/1-6-minmax.solution3
-rw-r--r--test_cases/q2/1-6-minmax.test75
-rw-r--r--test_cases/q2/1-7-minmax.solution3
-rw-r--r--test_cases/q2/1-7-minmax.test75
-rw-r--r--test_cases/q2/1-8-minmax.solution3
-rw-r--r--test_cases/q2/1-8-minmax.test75
-rw-r--r--test_cases/q2/2-1a-vary-depth.solution3
-rw-r--r--test_cases/q2/2-1a-vary-depth.test52
-rw-r--r--test_cases/q2/2-1b-vary-depth.solution3
-rw-r--r--test_cases/q2/2-1b-vary-depth.test52
-rw-r--r--test_cases/q2/2-2a-vary-depth.solution3
-rw-r--r--test_cases/q2/2-2a-vary-depth.test52
-rw-r--r--test_cases/q2/2-2b-vary-depth.solution3
-rw-r--r--test_cases/q2/2-2b-vary-depth.test52
-rw-r--r--test_cases/q2/2-3a-vary-depth.solution3
-rw-r--r--test_cases/q2/2-3a-vary-depth.test52
-rw-r--r--test_cases/q2/2-3b-vary-depth.solution3
-rw-r--r--test_cases/q2/2-3b-vary-depth.test52
-rw-r--r--test_cases/q2/2-4a-vary-depth.solution3
-rw-r--r--test_cases/q2/2-4a-vary-depth.test52
-rw-r--r--test_cases/q2/2-4b-vary-depth.solution3
-rw-r--r--test_cases/q2/2-4b-vary-depth.test52
-rw-r--r--test_cases/q2/2-one-ghost-3level.solution3
-rw-r--r--test_cases/q2/2-one-ghost-3level.test52
-rw-r--r--test_cases/q2/3-one-ghost-4level.solution3
-rw-r--r--test_cases/q2/3-one-ghost-4level.test79
-rw-r--r--test_cases/q2/4-two-ghosts-3level.solution3
-rw-r--r--test_cases/q2/4-two-ghosts-3level.test52
-rw-r--r--test_cases/q2/5-two-ghosts-4level.solution3
-rw-r--r--test_cases/q2/5-two-ghosts-4level.test79
-rw-r--r--test_cases/q2/6-tied-root.solution3
-rw-r--r--test_cases/q2/6-tied-root.test31
-rw-r--r--test_cases/q2/7-1a-check-depth-one-ghost.solution3
-rw-r--r--test_cases/q2/7-1a-check-depth-one-ghost.test83
-rw-r--r--test_cases/q2/7-1b-check-depth-one-ghost.solution3
-rw-r--r--test_cases/q2/7-1b-check-depth-one-ghost.test83
-rw-r--r--test_cases/q2/7-1c-check-depth-one-ghost.solution3
-rw-r--r--test_cases/q2/7-1c-check-depth-one-ghost.test83
-rw-r--r--test_cases/q2/7-2a-check-depth-two-ghosts.solution3
-rw-r--r--test_cases/q2/7-2a-check-depth-two-ghosts.test110
-rw-r--r--test_cases/q2/7-2b-check-depth-two-ghosts.solution3
-rw-r--r--test_cases/q2/7-2b-check-depth-two-ghosts.test110
-rw-r--r--test_cases/q2/7-2c-check-depth-two-ghosts.solution3
-rw-r--r--test_cases/q2/7-2c-check-depth-two-ghosts.test110
-rw-r--r--test_cases/q2/8-pacman-game.solution444
-rw-r--r--test_cases/q2/8-pacman-game.test19
-rw-r--r--test_cases/q2/CONFIG2
-rw-r--r--test_cases/q3/0-lecture-6-tree.solution3
-rw-r--r--test_cases/q3/0-lecture-6-tree.test50
-rw-r--r--test_cases/q3/0-small-tree.solution3
-rw-r--r--test_cases/q3/0-small-tree.test36
-rw-r--r--test_cases/q3/1-1-minmax.solution3
-rw-r--r--test_cases/q3/1-1-minmax.test47
-rw-r--r--test_cases/q3/1-2-minmax.solution3
-rw-r--r--test_cases/q3/1-2-minmax.test47
-rw-r--r--test_cases/q3/1-3-minmax.solution3
-rw-r--r--test_cases/q3/1-3-minmax.test47
-rw-r--r--test_cases/q3/1-4-minmax.solution3
-rw-r--r--test_cases/q3/1-4-minmax.test47
-rw-r--r--test_cases/q3/1-5-minmax.solution3
-rw-r--r--test_cases/q3/1-5-minmax.test75
-rw-r--r--test_cases/q3/1-6-minmax.solution3
-rw-r--r--test_cases/q3/1-6-minmax.test75
-rw-r--r--test_cases/q3/1-7-minmax.solution3
-rw-r--r--test_cases/q3/1-7-minmax.test75
-rw-r--r--test_cases/q3/1-8-minmax.solution3
-rw-r--r--test_cases/q3/1-8-minmax.test75
-rw-r--r--test_cases/q3/2-1a-vary-depth.solution3
-rw-r--r--test_cases/q3/2-1a-vary-depth.test52
-rw-r--r--test_cases/q3/2-1b-vary-depth.solution3
-rw-r--r--test_cases/q3/2-1b-vary-depth.test52
-rw-r--r--test_cases/q3/2-2a-vary-depth.solution3
-rw-r--r--test_cases/q3/2-2a-vary-depth.test52
-rw-r--r--test_cases/q3/2-2b-vary-depth.solution3
-rw-r--r--test_cases/q3/2-2b-vary-depth.test52
-rw-r--r--test_cases/q3/2-3a-vary-depth.solution3
-rw-r--r--test_cases/q3/2-3a-vary-depth.test52
-rw-r--r--test_cases/q3/2-3b-vary-depth.solution3
-rw-r--r--test_cases/q3/2-3b-vary-depth.test52
-rw-r--r--test_cases/q3/2-4a-vary-depth.solution3
-rw-r--r--test_cases/q3/2-4a-vary-depth.test52
-rw-r--r--test_cases/q3/2-4b-vary-depth.solution3
-rw-r--r--test_cases/q3/2-4b-vary-depth.test52
-rw-r--r--test_cases/q3/2-one-ghost-3level.solution3
-rw-r--r--test_cases/q3/2-one-ghost-3level.test52
-rw-r--r--test_cases/q3/3-one-ghost-4level.solution3
-rw-r--r--test_cases/q3/3-one-ghost-4level.test79
-rw-r--r--test_cases/q3/4-two-ghosts-3level.solution3
-rw-r--r--test_cases/q3/4-two-ghosts-3level.test52
-rw-r--r--test_cases/q3/5-two-ghosts-4level.solution3
-rw-r--r--test_cases/q3/5-two-ghosts-4level.test79
-rw-r--r--test_cases/q3/6-tied-root.solution3
-rw-r--r--test_cases/q3/6-tied-root.test31
-rw-r--r--test_cases/q3/7-1a-check-depth-one-ghost.solution3
-rw-r--r--test_cases/q3/7-1a-check-depth-one-ghost.test83
-rw-r--r--test_cases/q3/7-1b-check-depth-one-ghost.solution3
-rw-r--r--test_cases/q3/7-1b-check-depth-one-ghost.test83
-rw-r--r--test_cases/q3/7-1c-check-depth-one-ghost.solution3
-rw-r--r--test_cases/q3/7-1c-check-depth-one-ghost.test83
-rw-r--r--test_cases/q3/7-2a-check-depth-two-ghosts.solution3
-rw-r--r--test_cases/q3/7-2a-check-depth-two-ghosts.test110
-rw-r--r--test_cases/q3/7-2b-check-depth-two-ghosts.solution3
-rw-r--r--test_cases/q3/7-2b-check-depth-two-ghosts.test110
-rw-r--r--test_cases/q3/7-2c-check-depth-two-ghosts.solution3
-rw-r--r--test_cases/q3/7-2c-check-depth-two-ghosts.test110
-rw-r--r--test_cases/q3/8-pacman-game.solution444
-rw-r--r--test_cases/q3/8-pacman-game.test19
-rw-r--r--test_cases/q3/CONFIG2
-rw-r--r--test_cases/q4/0-expectimax1.solution3
-rw-r--r--test_cases/q4/0-expectimax1.test48
-rw-r--r--test_cases/q4/1-expectimax2.solution3
-rw-r--r--test_cases/q4/1-expectimax2.test48
-rw-r--r--test_cases/q4/2-one-ghost-3level.solution3
-rw-r--r--test_cases/q4/2-one-ghost-3level.test52
-rw-r--r--test_cases/q4/3-one-ghost-4level.solution3
-rw-r--r--test_cases/q4/3-one-ghost-4level.test79
-rw-r--r--test_cases/q4/4-two-ghosts-3level.solution3
-rw-r--r--test_cases/q4/4-two-ghosts-3level.test52
-rw-r--r--test_cases/q4/5-two-ghosts-4level.solution3
-rw-r--r--test_cases/q4/5-two-ghosts-4level.test79
-rw-r--r--test_cases/q4/6-1a-check-depth-one-ghost.solution3
-rw-r--r--test_cases/q4/6-1a-check-depth-one-ghost.test83
-rw-r--r--test_cases/q4/6-1b-check-depth-one-ghost.solution3
-rw-r--r--test_cases/q4/6-1b-check-depth-one-ghost.test83
-rw-r--r--test_cases/q4/6-1c-check-depth-one-ghost.solution3
-rw-r--r--test_cases/q4/6-1c-check-depth-one-ghost.test83
-rw-r--r--test_cases/q4/6-2a-check-depth-two-ghosts.solution3
-rw-r--r--test_cases/q4/6-2a-check-depth-two-ghosts.test110
-rw-r--r--test_cases/q4/6-2b-check-depth-two-ghosts.solution3
-rw-r--r--test_cases/q4/6-2b-check-depth-two-ghosts.test110
-rw-r--r--test_cases/q4/6-2c-check-depth-two-ghosts.solution3
-rw-r--r--test_cases/q4/6-2c-check-depth-two-ghosts.test110
-rw-r--r--test_cases/q4/7-pacman-game.solution444
-rw-r--r--test_cases/q4/7-pacman-game.test19
-rw-r--r--test_cases/q4/CONFIG2
-rw-r--r--test_cases/q5/CONFIG2
-rw-r--r--test_cases/q5/grade-agent.solution2
-rw-r--r--test_cases/q5/grade-agent.test18
-rw-r--r--textDisplay.py81
-rw-r--r--util.py653
187 files changed, 11693 insertions, 0 deletions
diff --git a/.vscode/launch.json b/.vscode/launch.json
new file mode 100644
index 0000000..e1970b8
--- /dev/null
+++ b/.vscode/launch.json
@@ -0,0 +1,26 @@
+{
+ // Use IntelliSense to learn about possible attributes.
+ // Hover to view descriptions of existing attributes.
+ // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
+ "version": "0.2.0",
+ "configurations": [
+ {
+ "name": "Reflex Agent",
+ "type": "python",
+ "request": "launch",
+ "program": "pacman.py",
+ "args": ["-p",
+ "ReflexAgent",
+ "-l",
+ "testClassic"
+ ]
+ },
+ {
+ "name": "autograder",
+ "type": "python",
+ "request": "launch",
+ "program": "autograder.py",
+ "args": []
+ }
+ ]
+} \ No newline at end of file
diff --git a/VERSION b/VERSION
new file mode 100644
index 0000000..6af849e
--- /dev/null
+++ b/VERSION
@@ -0,0 +1 @@
+v1.002
diff --git a/autograder.py b/autograder.py
new file mode 100644
index 0000000..9c11c4c
--- /dev/null
+++ b/autograder.py
@@ -0,0 +1,351 @@
+# autograder.py
+# -------------
+# Licensing Information: You are free to use or extend these projects for
+# educational purposes provided that (1) you do not distribute or publish
+# solutions, (2) you retain this notice, and (3) you provide clear
+# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
+#
+# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
+# The core projects and autograders were primarily created by John DeNero
+# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
+# Student side autograding was added by Brad Miller, Nick Hay, and
+# Pieter Abbeel (pabbeel@cs.berkeley.edu).
+
+
+# imports from python standard library
+import grading
+import imp
+import optparse
+import os
+import re
+import sys
+import projectParams
+import random
+random.seed(0)
+try:
+ from pacman import GameState
+except:
+ pass
+
+# register arguments and set default values
+def readCommand(argv):
+ parser = optparse.OptionParser(description = 'Run public tests on student code')
+ parser.set_defaults(generateSolutions=False, edxOutput=False, muteOutput=False, printTestCase=False, noGraphics=False)
+ parser.add_option('--test-directory',
+ dest = 'testRoot',
+ default = 'test_cases',
+ help = 'Root test directory which contains subdirectories corresponding to each question')
+ parser.add_option('--student-code',
+ dest = 'studentCode',
+ default = projectParams.STUDENT_CODE_DEFAULT,
+ help = 'comma separated list of student code files')
+ parser.add_option('--code-directory',
+ dest = 'codeRoot',
+ default = "",
+ help = 'Root directory containing the student and testClass code')
+ parser.add_option('--test-case-code',
+ dest = 'testCaseCode',
+ default = projectParams.PROJECT_TEST_CLASSES,
+ help = 'class containing testClass classes for this project')
+ parser.add_option('--generate-solutions',
+ dest = 'generateSolutions',
+ action = 'store_true',
+ help = 'Write solutions generated to .solution file')
+ parser.add_option('--edx-output',
+ dest = 'edxOutput',
+ action = 'store_true',
+ help = 'Generate edX output files')
+ parser.add_option('--mute',
+ dest = 'muteOutput',
+ action = 'store_true',
+ help = 'Mute output from executing tests')
+ parser.add_option('--print-tests', '-p',
+ dest = 'printTestCase',
+ action = 'store_true',
+ help = 'Print each test case before running them.')
+ parser.add_option('--test', '-t',
+ dest = 'runTest',
+ default = None,
+ help = 'Run one particular test. Relative to test root.')
+ parser.add_option('--question', '-q',
+ dest = 'gradeQuestion',
+ default = None,
+ help = 'Grade one particular question.')
+ parser.add_option('--no-graphics',
+ dest = 'noGraphics',
+ action = 'store_true',
+ help = 'No graphics display for pacman games.')
+ (options, args) = parser.parse_args(argv)
+ return options
+
+
+# confirm we should author solution files
+def confirmGenerate():
+ print 'WARNING: this action will overwrite any solution files.'
+ print 'Are you sure you want to proceed? (yes/no)'
+ while True:
+ ans = sys.stdin.readline().strip()
+ if ans == 'yes':
+ break
+ elif ans == 'no':
+ sys.exit(0)
+ else:
+ print 'please answer either "yes" or "no"'
+
+
+# TODO: Fix this so that it tracebacks work correctly
+# Looking at source of the traceback module, presuming it works
+# the same as the intepreters, it uses co_filename. This is,
+# however, a readonly attribute.
+def setModuleName(module, filename):
+ functionType = type(confirmGenerate)
+ classType = type(optparse.Option)
+
+ for i in dir(module):
+ o = getattr(module, i)
+ if hasattr(o, '__file__'): continue
+
+ if type(o) == functionType:
+ setattr(o, '__file__', filename)
+ elif type(o) == classType:
+ setattr(o, '__file__', filename)
+ # TODO: assign member __file__'s?
+ #print i, type(o)
+
+
+#from cStringIO import StringIO
+
+def loadModuleString(moduleSource):
+ # Below broken, imp doesn't believe its being passed a file:
+ # ValueError: load_module arg#2 should be a file or None
+ #
+ #f = StringIO(moduleCodeDict[k])
+ #tmp = imp.load_module(k, f, k, (".py", "r", imp.PY_SOURCE))
+ tmp = imp.new_module(k)
+ exec moduleCodeDict[k] in tmp.__dict__
+ setModuleName(tmp, k)
+ return tmp
+
+import py_compile
+
+def loadModuleFile(moduleName, filePath):
+ with open(filePath, 'r') as f:
+ return imp.load_module(moduleName, f, "%s.py" % moduleName, (".py", "r", imp.PY_SOURCE))
+
+
+def readFile(path, root=""):
+ "Read file from disk at specified path and return as string"
+ with open(os.path.join(root, path), 'r') as handle:
+ return handle.read()
+
+
+#######################################################################
+# Error Hint Map
+#######################################################################
+
+# TODO: use these
+ERROR_HINT_MAP = {
+ 'q1': {
+ "<type 'exceptions.IndexError'>": """
+ We noticed that your project threw an IndexError on q1.
+ While many things may cause this, it may have been from
+ assuming a certain number of successors from a state space
+ or assuming a certain number of actions available from a given
+ state. Try making your code more general (no hardcoded indices)
+ and submit again!
+ """
+ },
+ 'q3': {
+ "<type 'exceptions.AttributeError'>": """
+ We noticed that your project threw an AttributeError on q3.
+ While many things may cause this, it may have been from assuming
+ a certain size or structure to the state space. For example, if you have
+ a line of code assuming that the state is (x, y) and we run your code
+ on a state space with (x, y, z), this error could be thrown. Try
+ making your code more general and submit again!
+
+ """
+ }
+}
+
+import pprint
+
+def splitStrings(d):
+ d2 = dict(d)
+ for k in d:
+ if k[0:2] == "__":
+ del d2[k]
+ continue
+ if d2[k].find("\n") >= 0:
+ d2[k] = d2[k].split("\n")
+ return d2
+
+
+def printTest(testDict, solutionDict):
+ pp = pprint.PrettyPrinter(indent=4)
+ print "Test case:"
+ for line in testDict["__raw_lines__"]:
+ print " |", line
+ print "Solution:"
+ for line in solutionDict["__raw_lines__"]:
+ print " |", line
+
+
+def runTest(testName, moduleDict, printTestCase=False, display=None):
+ import testParser
+ import testClasses
+ for module in moduleDict:
+ setattr(sys.modules[__name__], module, moduleDict[module])
+
+ testDict = testParser.TestParser(testName + ".test").parse()
+ solutionDict = testParser.TestParser(testName + ".solution").parse()
+ test_out_file = os.path.join('%s.test_output' % testName)
+ testDict['test_out_file'] = test_out_file
+ testClass = getattr(projectTestClasses, testDict['class'])
+
+ questionClass = getattr(testClasses, 'Question')
+ question = questionClass({'max_points': 0}, display)
+ testCase = testClass(question, testDict)
+
+ if printTestCase:
+ printTest(testDict, solutionDict)
+
+ # This is a fragile hack to create a stub grades object
+ grades = grading.Grades(projectParams.PROJECT_NAME, [(None,0)])
+ testCase.execute(grades, moduleDict, solutionDict)
+
+
+# returns all the tests you need to run in order to run question
+def getDepends(testParser, testRoot, question):
+ allDeps = [question]
+ questionDict = testParser.TestParser(os.path.join(testRoot, question, 'CONFIG')).parse()
+ if 'depends' in questionDict:
+ depends = questionDict['depends'].split()
+ for d in depends:
+ # run dependencies first
+ allDeps = getDepends(testParser, testRoot, d) + allDeps
+ return allDeps
+
+# get list of questions to grade
+def getTestSubdirs(testParser, testRoot, questionToGrade):
+ problemDict = testParser.TestParser(os.path.join(testRoot, 'CONFIG')).parse()
+ if questionToGrade != None:
+ questions = getDepends(testParser, testRoot, questionToGrade)
+ if len(questions) > 1:
+ print 'Note: due to dependencies, the following tests will be run: %s' % ' '.join(questions)
+ return questions
+ if 'order' in problemDict:
+ return problemDict['order'].split()
+ return sorted(os.listdir(testRoot))
+
+
+# evaluate student code
+def evaluate(generateSolutions, testRoot, moduleDict, exceptionMap=ERROR_HINT_MAP, edxOutput=False, muteOutput=False,
+ printTestCase=False, questionToGrade=None, display=None):
+ # imports of testbench code. note that the testClasses import must follow
+ # the import of student code due to dependencies
+ import testParser
+ import testClasses
+ for module in moduleDict:
+ setattr(sys.modules[__name__], module, moduleDict[module])
+
+ questions = []
+ questionDicts = {}
+ test_subdirs = getTestSubdirs(testParser, testRoot, questionToGrade)
+ for q in test_subdirs:
+ subdir_path = os.path.join(testRoot, q)
+ if not os.path.isdir(subdir_path) or q[0] == '.':
+ continue
+
+ # create a question object
+ questionDict = testParser.TestParser(os.path.join(subdir_path, 'CONFIG')).parse()
+ questionClass = getattr(testClasses, questionDict['class'])
+ question = questionClass(questionDict, display)
+ questionDicts[q] = questionDict
+
+ # load test cases into question
+ tests = filter(lambda t: re.match('[^#~.].*\.test\Z', t), os.listdir(subdir_path))
+ tests = map(lambda t: re.match('(.*)\.test\Z', t).group(1), tests)
+ for t in sorted(tests):
+ test_file = os.path.join(subdir_path, '%s.test' % t)
+ solution_file = os.path.join(subdir_path, '%s.solution' % t)
+ test_out_file = os.path.join(subdir_path, '%s.test_output' % t)
+ testDict = testParser.TestParser(test_file).parse()
+ if testDict.get("disabled", "false").lower() == "true":
+ continue
+ testDict['test_out_file'] = test_out_file
+ testClass = getattr(projectTestClasses, testDict['class'])
+ testCase = testClass(question, testDict)
+ def makefun(testCase, solution_file):
+ if generateSolutions:
+ # write solution file to disk
+ return lambda grades: testCase.writeSolution(moduleDict, solution_file)
+ else:
+ # read in solution dictionary and pass as an argument
+ testDict = testParser.TestParser(test_file).parse()
+ solutionDict = testParser.TestParser(solution_file).parse()
+ if printTestCase:
+ return lambda grades: printTest(testDict, solutionDict) or testCase.execute(grades, moduleDict, solutionDict)
+ else:
+ return lambda grades: testCase.execute(grades, moduleDict, solutionDict)
+ question.addTestCase(testCase, makefun(testCase, solution_file))
+
+ # Note extra function is necessary for scoping reasons
+ def makefun(question):
+ return lambda grades: question.execute(grades)
+ setattr(sys.modules[__name__], q, makefun(question))
+ questions.append((q, question.getMaxPoints()))
+
+ grades = grading.Grades(projectParams.PROJECT_NAME, questions, edxOutput=edxOutput, muteOutput=muteOutput)
+ if questionToGrade == None:
+ for q in questionDicts:
+ for prereq in questionDicts[q].get('depends', '').split():
+ grades.addPrereq(q, prereq)
+
+ grades.grade(sys.modules[__name__], bonusPic = projectParams.BONUS_PIC)
+ return grades.points
+
+
+
+def getDisplay(graphicsByDefault, options=None):
+ graphics = graphicsByDefault
+ if options is not None and options.noGraphics:
+ graphics = False
+ if graphics:
+ try:
+ import graphicsDisplay
+ return graphicsDisplay.PacmanGraphics(1, frameTime=.05)
+ except ImportError:
+ pass
+ import textDisplay
+ return textDisplay.NullGraphics()
+
+
+
+
+if __name__ == '__main__':
+ options = readCommand(sys.argv)
+ if options.generateSolutions:
+ confirmGenerate()
+ codePaths = options.studentCode.split(',')
+ # moduleCodeDict = {}
+ # for cp in codePaths:
+ # moduleName = re.match('.*?([^/]*)\.py', cp).group(1)
+ # moduleCodeDict[moduleName] = readFile(cp, root=options.codeRoot)
+ # moduleCodeDict['projectTestClasses'] = readFile(options.testCaseCode, root=options.codeRoot)
+ # moduleDict = loadModuleDict(moduleCodeDict)
+
+ moduleDict = {}
+ for cp in codePaths:
+ moduleName = re.match('.*?([^/]*)\.py', cp).group(1)
+ moduleDict[moduleName] = loadModuleFile(moduleName, os.path.join(options.codeRoot, cp))
+ moduleName = re.match('.*?([^/]*)\.py', options.testCaseCode).group(1)
+ moduleDict['projectTestClasses'] = loadModuleFile(moduleName, os.path.join(options.codeRoot, options.testCaseCode))
+
+
+ if options.runTest != None:
+ runTest(options.runTest, moduleDict, printTestCase=options.printTestCase, display=getDisplay(True, options))
+ else:
+ evaluate(options.generateSolutions, options.testRoot, moduleDict,
+ edxOutput=options.edxOutput, muteOutput=options.muteOutput, printTestCase=options.printTestCase,
+ questionToGrade=options.gradeQuestion, display=getDisplay(options.gradeQuestion!=None, options))
diff --git a/game.py b/game.py
new file mode 100644
index 0000000..e34d6cf
--- /dev/null
+++ b/game.py
@@ -0,0 +1,729 @@
+# game.py
+# -------
+# Licensing Information: You are free to use or extend these projects for
+# educational purposes provided that (1) you do not distribute or publish
+# solutions, (2) you retain this notice, and (3) you provide clear
+# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
+#
+# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
+# The core projects and autograders were primarily created by John DeNero
+# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
+# Student side autograding was added by Brad Miller, Nick Hay, and
+# Pieter Abbeel (pabbeel@cs.berkeley.edu).
+
+
+# game.py
+# -------
+# Licensing Information: Please do not distribute or publish solutions to this
+# project. You are free to use and extend these projects for educational
+# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
+# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
+# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
+
+from util import *
+import time, os
+import traceback
+import sys
+
+#######################
+# Parts worth reading #
+#######################
+
+class Agent:
+ """
+ An agent must define a getAction method, but may also define the
+ following methods which will be called if they exist:
+
+ def registerInitialState(self, state): # inspects the starting state
+ """
+ def __init__(self, index=0):
+ self.index = index
+
+ def getAction(self, state):
+ """
+ The Agent will receive a GameState (from either {pacman, capture, sonar}.py) and
+ must return an action from Directions.{North, South, East, West, Stop}
+ """
+ raiseNotDefined()
+
+class Directions:
+ NORTH = 'North'
+ SOUTH = 'South'
+ EAST = 'East'
+ WEST = 'West'
+ STOP = 'Stop'
+
+ LEFT = {NORTH: WEST,
+ SOUTH: EAST,
+ EAST: NORTH,
+ WEST: SOUTH,
+ STOP: STOP}
+
+ RIGHT = dict([(y,x) for x, y in LEFT.items()])
+
+ REVERSE = {NORTH: SOUTH,
+ SOUTH: NORTH,
+ EAST: WEST,
+ WEST: EAST,
+ STOP: STOP}
+
+class Configuration:
+ """
+ A Configuration holds the (x,y) coordinate of a character, along with its
+ traveling direction.
+
+ The convention for positions, like a graph, is that (0,0) is the lower left corner, x increases
+ horizontally and y increases vertically. Therefore, north is the direction of increasing y, or (0,1).
+ """
+
+ def __init__(self, pos, direction):
+ self.pos = pos
+ self.direction = direction
+
+ def getPosition(self):
+ return (self.pos)
+
+ def getDirection(self):
+ return self.direction
+
+ def isInteger(self):
+ x,y = self.pos
+ return x == int(x) and y == int(y)
+
+ def __eq__(self, other):
+ if other == None: return False
+ return (self.pos == other.pos and self.direction == other.direction)
+
+ def __hash__(self):
+ x = hash(self.pos)
+ y = hash(self.direction)
+ return hash(x + 13 * y)
+
+ def __str__(self):
+ return "(x,y)="+str(self.pos)+", "+str(self.direction)
+
+ def generateSuccessor(self, vector):
+ """
+ Generates a new configuration reached by translating the current
+ configuration by the action vector. This is a low-level call and does
+ not attempt to respect the legality of the movement.
+
+ Actions are movement vectors.
+ """
+ x, y= self.pos
+ dx, dy = vector
+ direction = Actions.vectorToDirection(vector)
+ if direction == Directions.STOP:
+ direction = self.direction # There is no stop direction
+ return Configuration((x + dx, y+dy), direction)
+
+class AgentState:
+ """
+ AgentStates hold the state of an agent (configuration, speed, scared, etc).
+ """
+
+ def __init__( self, startConfiguration, isPacman ):
+ self.start = startConfiguration
+ self.configuration = startConfiguration
+ self.isPacman = isPacman
+ self.scaredTimer = 0
+ self.numCarrying = 0
+ self.numReturned = 0
+
+ def __str__( self ):
+ if self.isPacman:
+ return "Pacman: " + str( self.configuration )
+ else:
+ return "Ghost: " + str( self.configuration )
+
+ def __eq__( self, other ):
+ if other == None:
+ return False
+ return self.configuration == other.configuration and self.scaredTimer == other.scaredTimer
+
+ def __hash__(self):
+ return hash(hash(self.configuration) + 13 * hash(self.scaredTimer))
+
+ def copy( self ):
+ state = AgentState( self.start, self.isPacman )
+ state.configuration = self.configuration
+ state.scaredTimer = self.scaredTimer
+ state.numCarrying = self.numCarrying
+ state.numReturned = self.numReturned
+ return state
+
+ def getPosition(self):
+ if self.configuration == None: return None
+ return self.configuration.getPosition()
+
+ def getDirection(self):
+ return self.configuration.getDirection()
+
+class Grid:
+ """
+ A 2-dimensional array of objects backed by a list of lists. Data is accessed
+ via grid[x][y] where (x,y) are positions on a Pacman map with x horizontal,
+ y vertical and the origin (0,0) in the bottom left corner.
+
+ The __str__ method constructs an output that is oriented like a pacman board.
+ """
+ def __init__(self, width, height, initialValue=False, bitRepresentation=None):
+ if initialValue not in [False, True]: raise Exception('Grids can only contain booleans')
+ self.CELLS_PER_INT = 30
+
+ self.width = width
+ self.height = height
+ self.data = [[initialValue for y in range(height)] for x in range(width)]
+ if bitRepresentation:
+ self._unpackBits(bitRepresentation)
+
+ def __getitem__(self, i):
+ return self.data[i]
+
+ def __setitem__(self, key, item):
+ self.data[key] = item
+
+ def __str__(self):
+ out = [[str(self.data[x][y])[0] for x in range(self.width)] for y in range(self.height)]
+ out.reverse()
+ return '\n'.join([''.join(x) for x in out])
+
+ def __eq__(self, other):
+ if other == None: return False
+ return self.data == other.data
+
+ def __hash__(self):
+ # return hash(str(self))
+ base = 1
+ h = 0
+ for l in self.data:
+ for i in l:
+ if i:
+ h += base
+ base *= 2
+ return hash(h)
+
+ def copy(self):
+ g = Grid(self.width, self.height)
+ g.data = [x[:] for x in self.data]
+ return g
+
+ def deepCopy(self):
+ return self.copy()
+
+ def shallowCopy(self):
+ g = Grid(self.width, self.height)
+ g.data = self.data
+ return g
+
+ def count(self, item =True ):
+ return sum([x.count(item) for x in self.data])
+
+ def asList(self, key = True):
+ list = []
+ for x in range(self.width):
+ for y in range(self.height):
+ if self[x][y] == key: list.append( (x,y) )
+ return list
+
+ def packBits(self):
+ """
+ Returns an efficient int list representation
+
+ (width, height, bitPackedInts...)
+ """
+ bits = [self.width, self.height]
+ currentInt = 0
+ for i in range(self.height * self.width):
+ bit = self.CELLS_PER_INT - (i % self.CELLS_PER_INT) - 1
+ x, y = self._cellIndexToPosition(i)
+ if self[x][y]:
+ currentInt += 2 ** bit
+ if (i + 1) % self.CELLS_PER_INT == 0:
+ bits.append(currentInt)
+ currentInt = 0
+ bits.append(currentInt)
+ return tuple(bits)
+
+ def _cellIndexToPosition(self, index):
+ x = index / self.height
+ y = index % self.height
+ return x, y
+
+ def _unpackBits(self, bits):
+ """
+ Fills in data from a bit-level representation
+ """
+ cell = 0
+ for packed in bits:
+ for bit in self._unpackInt(packed, self.CELLS_PER_INT):
+ if cell == self.width * self.height: break
+ x, y = self._cellIndexToPosition(cell)
+ self[x][y] = bit
+ cell += 1
+
+ def _unpackInt(self, packed, size):
+ bools = []
+ if packed < 0: raise ValueError, "must be a positive integer"
+ for i in range(size):
+ n = 2 ** (self.CELLS_PER_INT - i - 1)
+ if packed >= n:
+ bools.append(True)
+ packed -= n
+ else:
+ bools.append(False)
+ return bools
+
+def reconstituteGrid(bitRep):
+ if type(bitRep) is not type((1,2)):
+ return bitRep
+ width, height = bitRep[:2]
+ return Grid(width, height, bitRepresentation= bitRep[2:])
+
+####################################
+# Parts you shouldn't have to read #
+####################################
+
+class Actions:
+ """
+ A collection of static methods for manipulating move actions.
+ """
+ # Directions
+ _directions = {Directions.NORTH: (0, 1),
+ Directions.SOUTH: (0, -1),
+ Directions.EAST: (1, 0),
+ Directions.WEST: (-1, 0),
+ Directions.STOP: (0, 0)}
+
+ _directionsAsList = _directions.items()
+
+ TOLERANCE = .001
+
+ def reverseDirection(action):
+ if action == Directions.NORTH:
+ return Directions.SOUTH
+ if action == Directions.SOUTH:
+ return Directions.NORTH
+ if action == Directions.EAST:
+ return Directions.WEST
+ if action == Directions.WEST:
+ return Directions.EAST
+ return action
+ reverseDirection = staticmethod(reverseDirection)
+
+ def vectorToDirection(vector):
+ dx, dy = vector
+ if dy > 0:
+ return Directions.NORTH
+ if dy < 0:
+ return Directions.SOUTH
+ if dx < 0:
+ return Directions.WEST
+ if dx > 0:
+ return Directions.EAST
+ return Directions.STOP
+ vectorToDirection = staticmethod(vectorToDirection)
+
+ def directionToVector(direction, speed = 1.0):
+ dx, dy = Actions._directions[direction]
+ return (dx * speed, dy * speed)
+ directionToVector = staticmethod(directionToVector)
+
+ def getPossibleActions(config, walls):
+ possible = []
+ x, y = config.pos
+ x_int, y_int = int(x + 0.5), int(y + 0.5)
+
+ # In between grid points, all agents must continue straight
+ if (abs(x - x_int) + abs(y - y_int) > Actions.TOLERANCE):
+ return [config.getDirection()]
+
+ for dir, vec in Actions._directionsAsList:
+ dx, dy = vec
+ next_y = y_int + dy
+ next_x = x_int + dx
+ if not walls[next_x][next_y]: possible.append(dir)
+
+ return possible
+
+ getPossibleActions = staticmethod(getPossibleActions)
+
+ def getLegalNeighbors(position, walls):
+ x,y = position
+ x_int, y_int = int(x + 0.5), int(y + 0.5)
+ neighbors = []
+ for dir, vec in Actions._directionsAsList:
+ dx, dy = vec
+ next_x = x_int + dx
+ if next_x < 0 or next_x == walls.width: continue
+ next_y = y_int + dy
+ if next_y < 0 or next_y == walls.height: continue
+ if not walls[next_x][next_y]: neighbors.append((next_x, next_y))
+ return neighbors
+ getLegalNeighbors = staticmethod(getLegalNeighbors)
+
+ def getSuccessor(position, action):
+ dx, dy = Actions.directionToVector(action)
+ x, y = position
+ return (x + dx, y + dy)
+ getSuccessor = staticmethod(getSuccessor)
+
+class GameStateData:
+ """
+
+ """
+ def __init__( self, prevState = None ):
+ """
+ Generates a new data packet by copying information from its predecessor.
+ """
+ if prevState != None:
+ self.food = prevState.food.shallowCopy()
+ self.capsules = prevState.capsules[:]
+ self.agentStates = self.copyAgentStates( prevState.agentStates )
+ self.layout = prevState.layout
+ self._eaten = prevState._eaten
+ self.score = prevState.score
+
+ self._foodEaten = None
+ self._foodAdded = None
+ self._capsuleEaten = None
+ self._agentMoved = None
+ self._lose = False
+ self._win = False
+ self.scoreChange = 0
+
+ def deepCopy( self ):
+ state = GameStateData( self )
+ state.food = self.food.deepCopy()
+ state.layout = self.layout.deepCopy()
+ state._agentMoved = self._agentMoved
+ state._foodEaten = self._foodEaten
+ state._foodAdded = self._foodAdded
+ state._capsuleEaten = self._capsuleEaten
+ return state
+
+ def copyAgentStates( self, agentStates ):
+ copiedStates = []
+ for agentState in agentStates:
+ copiedStates.append( agentState.copy() )
+ return copiedStates
+
+ def __eq__( self, other ):
+ """
+ Allows two states to be compared.
+ """
+ if other == None: return False
+ # TODO Check for type of other
+ if not self.agentStates == other.agentStates: return False
+ if not self.food == other.food: return False
+ if not self.capsules == other.capsules: return False
+ if not self.score == other.score: return False
+ return True
+
+ def __hash__( self ):
+ """
+ Allows states to be keys of dictionaries.
+ """
+ for i, state in enumerate( self.agentStates ):
+ try:
+ int(hash(state))
+ except TypeError, e:
+ print e
+ #hash(state)
+ return int((hash(tuple(self.agentStates)) + 13*hash(self.food) + 113* hash(tuple(self.capsules)) + 7 * hash(self.score)) % 1048575 )
+
+ def __str__( self ):
+ width, height = self.layout.width, self.layout.height
+ map = Grid(width, height)
+ if type(self.food) == type((1,2)):
+ self.food = reconstituteGrid(self.food)
+ for x in range(width):
+ for y in range(height):
+ food, walls = self.food, self.layout.walls
+ map[x][y] = self._foodWallStr(food[x][y], walls[x][y])
+
+ for agentState in self.agentStates:
+ if agentState == None: continue
+ if agentState.configuration == None: continue
+ x,y = [int( i ) for i in nearestPoint( agentState.configuration.pos )]
+ agent_dir = agentState.configuration.direction
+ if agentState.isPacman:
+ map[x][y] = self._pacStr( agent_dir )
+ else:
+ map[x][y] = self._ghostStr( agent_dir )
+
+ for x, y in self.capsules:
+ map[x][y] = 'o'
+
+ return str(map) + ("\nScore: %d\n" % self.score)
+
+ def _foodWallStr( self, hasFood, hasWall ):
+ if hasFood:
+ return '.'
+ elif hasWall:
+ return '%'
+ else:
+ return ' '
+
+ def _pacStr( self, dir ):
+ if dir == Directions.NORTH:
+ return 'v'
+ if dir == Directions.SOUTH:
+ return '^'
+ if dir == Directions.WEST:
+ return '>'
+ return '<'
+
+ def _ghostStr( self, dir ):
+ return 'G'
+ if dir == Directions.NORTH:
+ return 'M'
+ if dir == Directions.SOUTH:
+ return 'W'
+ if dir == Directions.WEST:
+ return '3'
+ return 'E'
+
+ def initialize( self, layout, numGhostAgents ):
+ """
+ Creates an initial game state from a layout array (see layout.py).
+ """
+ self.food = layout.food.copy()
+ #self.capsules = []
+ self.capsules = layout.capsules[:]
+ self.layout = layout
+ self.score = 0
+ self.scoreChange = 0
+
+ self.agentStates = []
+ numGhosts = 0
+ for isPacman, pos in layout.agentPositions:
+ if not isPacman:
+ if numGhosts == numGhostAgents: continue # Max ghosts reached already
+ else: numGhosts += 1
+ self.agentStates.append( AgentState( Configuration( pos, Directions.STOP), isPacman) )
+ self._eaten = [False for a in self.agentStates]
+
+try:
+ import boinc
+ _BOINC_ENABLED = True
+except:
+ _BOINC_ENABLED = False
+
+class Game:
+ """
+ The Game manages the control flow, soliciting actions from agents.
+ """
+
+ def __init__( self, agents, display, rules, startingIndex=0, muteAgents=False, catchExceptions=False ):
+ self.agentCrashed = False
+ self.agents = agents
+ self.display = display
+ self.rules = rules
+ self.startingIndex = startingIndex
+ self.gameOver = False
+ self.muteAgents = muteAgents
+ self.catchExceptions = catchExceptions
+ self.moveHistory = []
+ self.totalAgentTimes = [0 for agent in agents]
+ self.totalAgentTimeWarnings = [0 for agent in agents]
+ self.agentTimeout = False
+ import cStringIO
+ self.agentOutput = [cStringIO.StringIO() for agent in agents]
+
+ def getProgress(self):
+ if self.gameOver:
+ return 1.0
+ else:
+ return self.rules.getProgress(self)
+
+ def _agentCrash( self, agentIndex, quiet=False):
+ "Helper method for handling agent crashes"
+ if not quiet: traceback.print_exc()
+ self.gameOver = True
+ self.agentCrashed = True
+ self.rules.agentCrash(self, agentIndex)
+
+ OLD_STDOUT = None
+ OLD_STDERR = None
+
+ def mute(self, agentIndex):
+ if not self.muteAgents: return
+ global OLD_STDOUT, OLD_STDERR
+ import cStringIO
+ OLD_STDOUT = sys.stdout
+ OLD_STDERR = sys.stderr
+ sys.stdout = self.agentOutput[agentIndex]
+ sys.stderr = self.agentOutput[agentIndex]
+
+ def unmute(self):
+ if not self.muteAgents: return
+ global OLD_STDOUT, OLD_STDERR
+ # Revert stdout/stderr to originals
+ sys.stdout = OLD_STDOUT
+ sys.stderr = OLD_STDERR
+
+
+ def run( self ):
+ """
+ Main control loop for game play.
+ """
+ self.display.initialize(self.state.data)
+ self.numMoves = 0
+
+ ###self.display.initialize(self.state.makeObservation(1).data)
+ # inform learning agents of the game start
+ for i in range(len(self.agents)):
+ agent = self.agents[i]
+ if not agent:
+ self.mute(i)
+ # this is a null agent, meaning it failed to load
+ # the other team wins
+ print >>sys.stderr, "Agent %d failed to load" % i
+ self.unmute()
+ self._agentCrash(i, quiet=True)
+ return
+ if ("registerInitialState" in dir(agent)):
+ self.mute(i)
+ if self.catchExceptions:
+ try:
+ timed_func = TimeoutFunction(agent.registerInitialState, int(self.rules.getMaxStartupTime(i)))
+ try:
+ start_time = time.time()
+ timed_func(self.state.deepCopy())
+ time_taken = time.time() - start_time
+ self.totalAgentTimes[i] += time_taken
+ except TimeoutFunctionException:
+ print >>sys.stderr, "Agent %d ran out of time on startup!" % i
+ self.unmute()
+ self.agentTimeout = True
+ self._agentCrash(i, quiet=True)
+ return
+ except Exception,data:
+ self._agentCrash(i, quiet=False)
+ self.unmute()
+ return
+ else:
+ agent.registerInitialState(self.state.deepCopy())
+ ## TODO: could this exceed the total time
+ self.unmute()
+
+ agentIndex = self.startingIndex
+ numAgents = len( self.agents )
+
+ while not self.gameOver:
+ # Fetch the next agent
+ agent = self.agents[agentIndex]
+ move_time = 0
+ skip_action = False
+ # Generate an observation of the state
+ if 'observationFunction' in dir( agent ):
+ self.mute(agentIndex)
+ if self.catchExceptions:
+ try:
+ timed_func = TimeoutFunction(agent.observationFunction, int(self.rules.getMoveTimeout(agentIndex)))
+ try:
+ start_time = time.time()
+ observation = timed_func(self.state.deepCopy())
+ except TimeoutFunctionException:
+ skip_action = True
+ move_time += time.time() - start_time
+ self.unmute()
+ except Exception,data:
+ self._agentCrash(agentIndex, quiet=False)
+ self.unmute()
+ return
+ else:
+ observation = agent.observationFunction(self.state.deepCopy())
+ self.unmute()
+ else:
+ observation = self.state.deepCopy()
+
+ # Solicit an action
+ action = None
+ self.mute(agentIndex)
+ if self.catchExceptions:
+ try:
+ timed_func = TimeoutFunction(agent.getAction, int(self.rules.getMoveTimeout(agentIndex)) - int(move_time))
+ try:
+ start_time = time.time()
+ if skip_action:
+ raise TimeoutFunctionException()
+ action = timed_func( observation )
+ except TimeoutFunctionException:
+ print >>sys.stderr, "Agent %d timed out on a single move!" % agentIndex
+ self.agentTimeout = True
+ self._agentCrash(agentIndex, quiet=True)
+ self.unmute()
+ return
+
+ move_time += time.time() - start_time
+
+ if move_time > self.rules.getMoveWarningTime(agentIndex):
+ self.totalAgentTimeWarnings[agentIndex] += 1
+ print >>sys.stderr, "Agent %d took too long to make a move! This is warning %d" % (agentIndex, self.totalAgentTimeWarnings[agentIndex])
+ if self.totalAgentTimeWarnings[agentIndex] > self.rules.getMaxTimeWarnings(agentIndex):
+ print >>sys.stderr, "Agent %d exceeded the maximum number of warnings: %d" % (agentIndex, self.totalAgentTimeWarnings[agentIndex])
+ self.agentTimeout = True
+ self._agentCrash(agentIndex, quiet=True)
+ self.unmute()
+ return
+
+ self.totalAgentTimes[agentIndex] += move_time
+ #print "Agent: %d, time: %f, total: %f" % (agentIndex, move_time, self.totalAgentTimes[agentIndex])
+ if self.totalAgentTimes[agentIndex] > self.rules.getMaxTotalTime(agentIndex):
+ print >>sys.stderr, "Agent %d ran out of time! (time: %1.2f)" % (agentIndex, self.totalAgentTimes[agentIndex])
+ self.agentTimeout = True
+ self._agentCrash(agentIndex, quiet=True)
+ self.unmute()
+ return
+ self.unmute()
+ except Exception,data:
+ self._agentCrash(agentIndex)
+ self.unmute()
+ return
+ else:
+ action = agent.getAction(observation)
+ self.unmute()
+
+ # Execute the action
+ self.moveHistory.append( (agentIndex, action) )
+ if self.catchExceptions:
+ try:
+ self.state = self.state.generateSuccessor( agentIndex, action )
+ except Exception,data:
+ self.mute(agentIndex)
+ self._agentCrash(agentIndex)
+ self.unmute()
+ return
+ else:
+ self.state = self.state.generateSuccessor( agentIndex, action )
+
+ # Change the display
+ self.display.update( self.state.data )
+ ###idx = agentIndex - agentIndex % 2 + 1
+ ###self.display.update( self.state.makeObservation(idx).data )
+
+ # Allow for game specific conditions (winning, losing, etc.)
+ self.rules.process(self.state, self)
+ # Track progress
+ if agentIndex == numAgents + 1: self.numMoves += 1
+ # Next agent
+ agentIndex = ( agentIndex + 1 ) % numAgents
+
+ if _BOINC_ENABLED:
+ boinc.set_fraction_done(self.getProgress())
+
+ # inform a learning agent of the game result
+ for agentIndex, agent in enumerate(self.agents):
+ if "final" in dir( agent ) :
+ try:
+ self.mute(agentIndex)
+ agent.final( self.state )
+ self.unmute()
+ except Exception,data:
+ if not self.catchExceptions: raise
+ self._agentCrash(agentIndex)
+ self.unmute()
+ return
+ self.display.finish()
diff --git a/ghostAgents.py b/ghostAgents.py
new file mode 100644
index 0000000..c3afe1f
--- /dev/null
+++ b/ghostAgents.py
@@ -0,0 +1,81 @@
+# ghostAgents.py
+# --------------
+# Licensing Information: You are free to use or extend these projects for
+# educational purposes provided that (1) you do not distribute or publish
+# solutions, (2) you retain this notice, and (3) you provide clear
+# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
+#
+# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
+# The core projects and autograders were primarily created by John DeNero
+# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
+# Student side autograding was added by Brad Miller, Nick Hay, and
+# Pieter Abbeel (pabbeel@cs.berkeley.edu).
+
+
+from game import Agent
+from game import Actions
+from game import Directions
+import random
+from util import manhattanDistance
+import util
+
+class GhostAgent( Agent ):
+ def __init__( self, index ):
+ self.index = index
+
+ def getAction( self, state ):
+ dist = self.getDistribution(state)
+ if len(dist) == 0:
+ return Directions.STOP
+ else:
+ return util.chooseFromDistribution( dist )
+
+ def getDistribution(self, state):
+ "Returns a Counter encoding a distribution over actions from the provided state."
+ util.raiseNotDefined()
+
+class RandomGhost( GhostAgent ):
+ "A ghost that chooses a legal action uniformly at random."
+ def getDistribution( self, state ):
+ dist = util.Counter()
+ for a in state.getLegalActions( self.index ): dist[a] = 1.0
+ dist.normalize()
+ return dist
+
+class DirectionalGhost( GhostAgent ):
+ "A ghost that prefers to rush Pacman, or flee when scared."
+ def __init__( self, index, prob_attack=0.8, prob_scaredFlee=0.8 ):
+ self.index = index
+ self.prob_attack = prob_attack
+ self.prob_scaredFlee = prob_scaredFlee
+
+ def getDistribution( self, state ):
+ # Read variables from state
+ ghostState = state.getGhostState( self.index )
+ legalActions = state.getLegalActions( self.index )
+ pos = state.getGhostPosition( self.index )
+ isScared = ghostState.scaredTimer > 0
+
+ speed = 1
+ if isScared: speed = 0.5
+
+ actionVectors = [Actions.directionToVector( a, speed ) for a in legalActions]
+ newPositions = [( pos[0]+a[0], pos[1]+a[1] ) for a in actionVectors]
+ pacmanPosition = state.getPacmanPosition()
+
+ # Select best actions given the state
+ distancesToPacman = [manhattanDistance( pos, pacmanPosition ) for pos in newPositions]
+ if isScared:
+ bestScore = max( distancesToPacman )
+ bestProb = self.prob_scaredFlee
+ else:
+ bestScore = min( distancesToPacman )
+ bestProb = self.prob_attack
+ bestActions = [action for action, distance in zip( legalActions, distancesToPacman ) if distance == bestScore]
+
+ # Construct distribution
+ dist = util.Counter()
+ for a in bestActions: dist[a] = bestProb / len(bestActions)
+ for a in legalActions: dist[a] += ( 1-bestProb ) / len(legalActions)
+ dist.normalize()
+ return dist
diff --git a/grading.py b/grading.py
new file mode 100644
index 0000000..0ef07a9
--- /dev/null
+++ b/grading.py
@@ -0,0 +1,282 @@
+# grading.py
+# ----------
+# Licensing Information: You are free to use or extend these projects for
+# educational purposes provided that (1) you do not distribute or publish
+# solutions, (2) you retain this notice, and (3) you provide clear
+# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
+#
+# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
+# The core projects and autograders were primarily created by John DeNero
+# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
+# Student side autograding was added by Brad Miller, Nick Hay, and
+# Pieter Abbeel (pabbeel@cs.berkeley.edu).
+
+
+"Common code for autograders"
+
+import cgi
+import time
+import sys
+import traceback
+import pdb
+from collections import defaultdict
+import util
+
+class Grades:
+ "A data structure for project grades, along with formatting code to display them"
+ def __init__(self, projectName, questionsAndMaxesList, edxOutput=False, muteOutput=False):
+ """
+ Defines the grading scheme for a project
+ projectName: project name
+ questionsAndMaxesDict: a list of (question name, max points per question)
+ """
+ self.questions = [el[0] for el in questionsAndMaxesList]
+ self.maxes = dict(questionsAndMaxesList)
+ self.points = Counter()
+ self.messages = dict([(q, []) for q in self.questions])
+ self.project = projectName
+ self.start = time.localtime()[1:6]
+ self.sane = True # Sanity checks
+ self.currentQuestion = None # Which question we're grading
+ self.edxOutput = edxOutput
+ self.mute = muteOutput
+ self.prereqs = defaultdict(set)
+
+ #print 'Autograder transcript for %s' % self.project
+ print 'Starting on %d-%d at %d:%02d:%02d' % self.start
+
+ def addPrereq(self, question, prereq):
+ self.prereqs[question].add(prereq)
+
+ def grade(self, gradingModule, exceptionMap = {}, bonusPic = False):
+ """
+ Grades each question
+ gradingModule: the module with all the grading functions (pass in with sys.modules[__name__])
+ """
+
+ completedQuestions = set([])
+ for q in self.questions:
+ print '\nQuestion %s' % q
+ print '=' * (9 + len(q))
+ print
+ self.currentQuestion = q
+
+ incompleted = self.prereqs[q].difference(completedQuestions)
+ if len(incompleted) > 0:
+ prereq = incompleted.pop()
+ print \
+"""*** NOTE: Make sure to complete Question %s before working on Question %s,
+*** because Question %s builds upon your answer for Question %s.
+""" % (prereq, q, q, prereq)
+ continue
+
+ if self.mute: util.mutePrint()
+ try:
+ util.TimeoutFunction(getattr(gradingModule, q),300)(self) # Call the question's function
+ #TimeoutFunction(getattr(gradingModule, q),1200)(self) # Call the question's function
+ except Exception, inst:
+ self.addExceptionMessage(q, inst, traceback)
+ self.addErrorHints(exceptionMap, inst, q[1])
+ except:
+ self.fail('FAIL: Terminated with a string exception.')
+ finally:
+ if self.mute: util.unmutePrint()
+
+ if self.points[q] >= self.maxes[q]:
+ completedQuestions.add(q)
+
+ print '\n### Question %s: %d/%d ###\n' % (q, self.points[q], self.maxes[q])
+
+
+ print '\nFinished at %d:%02d:%02d' % time.localtime()[3:6]
+ print "\nProvisional grades\n=================="
+
+ for q in self.questions:
+ print 'Question %s: %d/%d' % (q, self.points[q], self.maxes[q])
+ print '------------------'
+ print 'Total: %d/%d' % (self.points.totalCount(), sum(self.maxes.values()))
+ if bonusPic and self.points.totalCount() == 25:
+ print """
+
+ ALL HAIL GRANDPAC.
+ LONG LIVE THE GHOSTBUSTING KING.
+
+ --- ---- ---
+ | \ / + \ / |
+ | + \--/ \--/ + |
+ | + + |
+ | + + + |
+ @@@@@@@@@@@@@@@@@@@@@@@@@@
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ \ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ \ / @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ V \ @@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ \ / @@@@@@@@@@@@@@@@@@@@@@@@@@
+ V @@@@@@@@@@@@@@@@@@@@@@@@
+ @@@@@@@@@@@@@@@@@@@@@@
+ /\ @@@@@@@@@@@@@@@@@@@@@@
+ / \ @@@@@@@@@@@@@@@@@@@@@@@@@
+ /\ / @@@@@@@@@@@@@@@@@@@@@@@@@@@
+ / \ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ / @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+ @@@@@@@@@@@@@@@@@@@@@@@@@@
+ @@@@@@@@@@@@@@@@@@
+
+"""
+ print """
+Your grades are NOT yet registered. To register your grades, make sure
+to follow your instructor's guidelines to receive credit on your project.
+"""
+
+ if self.edxOutput:
+ self.produceOutput()
+
+ def addExceptionMessage(self, q, inst, traceback):
+ """
+ Method to format the exception message, this is more complicated because
+ we need to cgi.escape the traceback but wrap the exception in a <pre> tag
+ """
+ self.fail('FAIL: Exception raised: %s' % inst)
+ self.addMessage('')
+ for line in traceback.format_exc().split('\n'):
+ self.addMessage(line)
+
+ def addErrorHints(self, exceptionMap, errorInstance, questionNum):
+ typeOf = str(type(errorInstance))
+ questionName = 'q' + questionNum
+ errorHint = ''
+
+ # question specific error hints
+ if exceptionMap.get(questionName):
+ questionMap = exceptionMap.get(questionName)
+ if (questionMap.get(typeOf)):
+ errorHint = questionMap.get(typeOf)
+ # fall back to general error messages if a question specific
+ # one does not exist
+ if (exceptionMap.get(typeOf)):
+ errorHint = exceptionMap.get(typeOf)
+
+ # dont include the HTML if we have no error hint
+ if not errorHint:
+ return ''
+
+ for line in errorHint.split('\n'):
+ self.addMessage(line)
+
+ def produceOutput(self):
+ edxOutput = open('edx_response.html', 'w')
+ edxOutput.write("<div>")
+
+ # first sum
+ total_possible = sum(self.maxes.values())
+ total_score = sum(self.points.values())
+ checkOrX = '<span class="incorrect"/>'
+ if (total_score >= total_possible):
+ checkOrX = '<span class="correct"/>'
+ header = """
+ <h3>
+ Total score ({total_score} / {total_possible})
+ </h3>
+ """.format(total_score = total_score,
+ total_possible = total_possible,
+ checkOrX = checkOrX
+ )
+ edxOutput.write(header)
+
+ for q in self.questions:
+ if len(q) == 2:
+ name = q[1]
+ else:
+ name = q
+ checkOrX = '<span class="incorrect"/>'
+ if (self.points[q] == self.maxes[q]):
+ checkOrX = '<span class="correct"/>'
+ #messages = '\n<br/>\n'.join(self.messages[q])
+ messages = "<pre>%s</pre>" % '\n'.join(self.messages[q])
+ output = """
+ <div class="test">
+ <section>
+ <div class="shortform">
+ Question {q} ({points}/{max}) {checkOrX}
+ </div>
+ <div class="longform">
+ {messages}
+ </div>
+ </section>
+ </div>
+ """.format(q = name,
+ max = self.maxes[q],
+ messages = messages,
+ checkOrX = checkOrX,
+ points = self.points[q]
+ )
+ # print "*** output for Question %s " % q[1]
+ # print output
+ edxOutput.write(output)
+ edxOutput.write("</div>")
+ edxOutput.close()
+ edxOutput = open('edx_grade', 'w')
+ edxOutput.write(str(self.points.totalCount()))
+ edxOutput.close()
+
+ def fail(self, message, raw=False):
+ "Sets sanity check bit to false and outputs a message"
+ self.sane = False
+ self.assignZeroCredit()
+ self.addMessage(message, raw)
+
+ def assignZeroCredit(self):
+ self.points[self.currentQuestion] = 0
+
+ def addPoints(self, amt):
+ self.points[self.currentQuestion] += amt
+
+ def deductPoints(self, amt):
+ self.points[self.currentQuestion] -= amt
+
+ def assignFullCredit(self, message="", raw=False):
+ self.points[self.currentQuestion] = self.maxes[self.currentQuestion]
+ if message != "":
+ self.addMessage(message, raw)
+
+ def addMessage(self, message, raw=False):
+ if not raw:
+ # We assume raw messages, formatted for HTML, are printed separately
+ if self.mute: util.unmutePrint()
+ print '*** ' + message
+ if self.mute: util.mutePrint()
+ message = cgi.escape(message)
+ self.messages[self.currentQuestion].append(message)
+
+ def addMessageToEmail(self, message):
+ print "WARNING**** addMessageToEmail is deprecated %s" % message
+ for line in message.split('\n'):
+ pass
+ #print '%%% ' + line + ' %%%'
+ #self.messages[self.currentQuestion].append(line)
+
+
+
+
+
+class Counter(dict):
+ """
+ Dict with default 0
+ """
+ def __getitem__(self, idx):
+ try:
+ return dict.__getitem__(self, idx)
+ except KeyError:
+ return 0
+
+ def totalCount(self):
+ """
+ Returns the sum of counts for all keys.
+ """
+ return sum(self.values())
+
diff --git a/graphicsDisplay.py b/graphicsDisplay.py
new file mode 100644
index 0000000..1bfe1b3
--- /dev/null
+++ b/graphicsDisplay.py
@@ -0,0 +1,679 @@
+# graphicsDisplay.py
+# ------------------
+# Licensing Information: You are free to use or extend these projects for
+# educational purposes provided that (1) you do not distribute or publish
+# solutions, (2) you retain this notice, and (3) you provide clear
+# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
+#
+# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
+# The core projects and autograders were primarily created by John DeNero
+# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
+# Student side autograding was added by Brad Miller, Nick Hay, and
+# Pieter Abbeel (pabbeel@cs.berkeley.edu).
+
+
+from graphicsUtils import *
+import math, time
+from game import Directions
+
+###########################
+# GRAPHICS DISPLAY CODE #
+###########################
+
+# Most code by Dan Klein and John Denero written or rewritten for cs188, UC Berkeley.
+# Some code from a Pacman implementation by LiveWires, and used / modified with permission.
+
+DEFAULT_GRID_SIZE = 30.0
+INFO_PANE_HEIGHT = 35
+BACKGROUND_COLOR = formatColor(0,0,0)
+WALL_COLOR = formatColor(0.0/255.0, 51.0/255.0, 255.0/255.0)
+INFO_PANE_COLOR = formatColor(.4,.4,0)
+SCORE_COLOR = formatColor(.9, .9, .9)
+PACMAN_OUTLINE_WIDTH = 2
+PACMAN_CAPTURE_OUTLINE_WIDTH = 4
+
+GHOST_COLORS = []
+GHOST_COLORS.append(formatColor(.9,0,0)) # Red
+GHOST_COLORS.append(formatColor(0,.3,.9)) # Blue
+GHOST_COLORS.append(formatColor(.98,.41,.07)) # Orange
+GHOST_COLORS.append(formatColor(.1,.75,.7)) # Green
+GHOST_COLORS.append(formatColor(1.0,0.6,0.0)) # Yellow
+GHOST_COLORS.append(formatColor(.4,0.13,0.91)) # Purple
+
+TEAM_COLORS = GHOST_COLORS[:2]
+
+GHOST_SHAPE = [
+ ( 0, 0.3 ),
+ ( 0.25, 0.75 ),
+ ( 0.5, 0.3 ),
+ ( 0.75, 0.75 ),
+ ( 0.75, -0.5 ),
+ ( 0.5, -0.75 ),
+ (-0.5, -0.75 ),
+ (-0.75, -0.5 ),
+ (-0.75, 0.75 ),
+ (-0.5, 0.3 ),
+ (-0.25, 0.75 )
+ ]
+GHOST_SIZE = 0.65
+SCARED_COLOR = formatColor(1,1,1)
+
+GHOST_VEC_COLORS = map(colorToVector, GHOST_COLORS)
+
+PACMAN_COLOR = formatColor(255.0/255.0,255.0/255.0,61.0/255)
+PACMAN_SCALE = 0.5
+#pacman_speed = 0.25
+
+# Food
+FOOD_COLOR = formatColor(1,1,1)
+FOOD_SIZE = 0.1
+
+# Laser
+LASER_COLOR = formatColor(1,0,0)
+LASER_SIZE = 0.02
+
+# Capsule graphics
+CAPSULE_COLOR = formatColor(1,1,1)
+CAPSULE_SIZE = 0.25
+
+# Drawing walls
+WALL_RADIUS = 0.15
+
+class InfoPane:
+ def __init__(self, layout, gridSize):
+ self.gridSize = gridSize
+ self.width = (layout.width) * gridSize
+ self.base = (layout.height + 1) * gridSize
+ self.height = INFO_PANE_HEIGHT
+ self.fontSize = 24
+ self.textColor = PACMAN_COLOR
+ self.drawPane()
+
+ def toScreen(self, pos, y = None):
+ """
+ Translates a point relative from the bottom left of the info pane.
+ """
+ if y == None:
+ x,y = pos
+ else:
+ x = pos
+
+ x = self.gridSize + x # Margin
+ y = self.base + y
+ return x,y
+
+ def drawPane(self):
+ self.scoreText = text( self.toScreen(0, 0 ), self.textColor, "SCORE: 0", "Times", self.fontSize, "bold")
+
+ def initializeGhostDistances(self, distances):
+ self.ghostDistanceText = []
+
+ size = 20
+ if self.width < 240:
+ size = 12
+ if self.width < 160:
+ size = 10
+
+ for i, d in enumerate(distances):
+ t = text( self.toScreen(self.width/2 + self.width/8 * i, 0), GHOST_COLORS[i+1], d, "Times", size, "bold")
+ self.ghostDistanceText.append(t)
+
+ def updateScore(self, score):
+ changeText(self.scoreText, "SCORE: % 4d" % score)
+
+ def setTeam(self, isBlue):
+ text = "RED TEAM"
+ if isBlue: text = "BLUE TEAM"
+ self.teamText = text( self.toScreen(300, 0 ), self.textColor, text, "Times", self.fontSize, "bold")
+
+ def updateGhostDistances(self, distances):
+ if len(distances) == 0: return
+ if 'ghostDistanceText' not in dir(self): self.initializeGhostDistances(distances)
+ else:
+ for i, d in enumerate(distances):
+ changeText(self.ghostDistanceText[i], d)
+
+ def drawGhost(self):
+ pass
+
+ def drawPacman(self):
+ pass
+
+ def drawWarning(self):
+ pass
+
+ def clearIcon(self):
+ pass
+
+ def updateMessage(self, message):
+ pass
+
+ def clearMessage(self):
+ pass
+
+
+class PacmanGraphics:
+ def __init__(self, zoom=1.0, frameTime=0.0, capture=False):
+ self.have_window = 0
+ self.currentGhostImages = {}
+ self.pacmanImage = None
+ self.zoom = zoom
+ self.gridSize = DEFAULT_GRID_SIZE * zoom
+ self.capture = capture
+ self.frameTime = frameTime
+
+ def checkNullDisplay(self):
+ return False
+
+ def initialize(self, state, isBlue = False):
+ self.isBlue = isBlue
+ self.startGraphics(state)
+
+ # self.drawDistributions(state)
+ self.distributionImages = None # Initialized lazily
+ self.drawStaticObjects(state)
+ self.drawAgentObjects(state)
+
+ # Information
+ self.previousState = state
+
+ def startGraphics(self, state):
+ self.layout = state.layout
+ layout = self.layout
+ self.width = layout.width
+ self.height = layout.height
+ self.make_window(self.width, self.height)
+ self.infoPane = InfoPane(layout, self.gridSize)
+ self.currentState = layout
+
+ def drawDistributions(self, state):
+ walls = state.layout.walls
+ dist = []
+ for x in range(walls.width):
+ distx = []
+ dist.append(distx)
+ for y in range(walls.height):
+ ( screen_x, screen_y ) = self.to_screen( (x, y) )
+ block = square( (screen_x, screen_y),
+ 0.5 * self.gridSize,
+ color = BACKGROUND_COLOR,
+ filled = 1, behind=2)
+ distx.append(block)
+ self.distributionImages = dist
+
+ def drawStaticObjects(self, state):
+ layout = self.layout
+ self.drawWalls(layout.walls)
+ self.food = self.drawFood(layout.food)
+ self.capsules = self.drawCapsules(layout.capsules)
+ refresh()
+
+ def drawAgentObjects(self, state):
+ self.agentImages = [] # (agentState, image)
+ for index, agent in enumerate(state.agentStates):
+ if agent.isPacman:
+ image = self.drawPacman(agent, index)
+ self.agentImages.append( (agent, image) )
+ else:
+ image = self.drawGhost(agent, index)
+ self.agentImages.append( (agent, image) )
+ refresh()
+
+ def swapImages(self, agentIndex, newState):
+ """
+ Changes an image from a ghost to a pacman or vis versa (for capture)
+ """
+ prevState, prevImage = self.agentImages[agentIndex]
+ for item in prevImage: remove_from_screen(item)
+ if newState.isPacman:
+ image = self.drawPacman(newState, agentIndex)
+ self.agentImages[agentIndex] = (newState, image )
+ else:
+ image = self.drawGhost(newState, agentIndex)
+ self.agentImages[agentIndex] = (newState, image )
+ refresh()
+
+ def update(self, newState):
+ agentIndex = newState._agentMoved
+ agentState = newState.agentStates[agentIndex]
+
+ if self.agentImages[agentIndex][0].isPacman != agentState.isPacman: self.swapImages(agentIndex, agentState)
+ prevState, prevImage = self.agentImages[agentIndex]
+ if agentState.isPacman:
+ self.animatePacman(agentState, prevState, prevImage)
+ else:
+ self.moveGhost(agentState, agentIndex, prevState, prevImage)
+ self.agentImages[agentIndex] = (agentState, prevImage)
+
+ if newState._foodEaten != None:
+ self.removeFood(newState._foodEaten, self.food)
+ if newState._capsuleEaten != None:
+ self.removeCapsule(newState._capsuleEaten, self.capsules)
+ self.infoPane.updateScore(newState.score)
+ if 'ghostDistances' in dir(newState):
+ self.infoPane.updateGhostDistances(newState.ghostDistances)
+
+ def make_window(self, width, height):
+ grid_width = (width-1) * self.gridSize
+ grid_height = (height-1) * self.gridSize
+ screen_width = 2*self.gridSize + grid_width
+ screen_height = 2*self.gridSize + grid_height + INFO_PANE_HEIGHT
+
+ begin_graphics(screen_width,
+ screen_height,
+ BACKGROUND_COLOR,
+ "CS188 Pacman")
+
+ def drawPacman(self, pacman, index):
+ position = self.getPosition(pacman)
+ screen_point = self.to_screen(position)
+ endpoints = self.getEndpoints(self.getDirection(pacman))
+
+ width = PACMAN_OUTLINE_WIDTH
+ outlineColor = PACMAN_COLOR
+ fillColor = PACMAN_COLOR
+
+ if self.capture:
+ outlineColor = TEAM_COLORS[index % 2]
+ fillColor = GHOST_COLORS[index]
+ width = PACMAN_CAPTURE_OUTLINE_WIDTH
+
+ return [circle(screen_point, PACMAN_SCALE * self.gridSize,
+ fillColor = fillColor, outlineColor = outlineColor,
+ endpoints = endpoints,
+ width = width)]
+
+ def getEndpoints(self, direction, position=(0,0)):
+ x, y = position
+ pos = x - int(x) + y - int(y)
+ width = 30 + 80 * math.sin(math.pi* pos)
+
+ delta = width / 2
+ if (direction == 'West'):
+ endpoints = (180+delta, 180-delta)
+ elif (direction == 'North'):
+ endpoints = (90+delta, 90-delta)
+ elif (direction == 'South'):
+ endpoints = (270+delta, 270-delta)
+ else:
+ endpoints = (0+delta, 0-delta)
+ return endpoints
+
+ def movePacman(self, position, direction, image):
+ screenPosition = self.to_screen(position)
+ endpoints = self.getEndpoints( direction, position )
+ r = PACMAN_SCALE * self.gridSize
+ moveCircle(image[0], screenPosition, r, endpoints)
+ refresh()
+
+ def animatePacman(self, pacman, prevPacman, image):
+ if self.frameTime < 0:
+ print 'Press any key to step forward, "q" to play'
+ keys = wait_for_keys()
+ if 'q' in keys:
+ self.frameTime = 0.1
+ if self.frameTime > 0.01 or self.frameTime < 0:
+ start = time.time()
+ fx, fy = self.getPosition(prevPacman)
+ px, py = self.getPosition(pacman)
+ frames = 4.0
+ for i in range(1,int(frames) + 1):
+ pos = px*i/frames + fx*(frames-i)/frames, py*i/frames + fy*(frames-i)/frames
+ self.movePacman(pos, self.getDirection(pacman), image)
+ refresh()
+ sleep(abs(self.frameTime) / frames)
+ else:
+ self.movePacman(self.getPosition(pacman), self.getDirection(pacman), image)
+ refresh()
+
+ def getGhostColor(self, ghost, ghostIndex):
+ if ghost.scaredTimer > 0:
+ return SCARED_COLOR
+ else:
+ return GHOST_COLORS[ghostIndex]
+
+ def drawGhost(self, ghost, agentIndex):
+ pos = self.getPosition(ghost)
+ dir = self.getDirection(ghost)
+ (screen_x, screen_y) = (self.to_screen(pos) )
+ coords = []
+ for (x, y) in GHOST_SHAPE:
+ coords.append((x*self.gridSize*GHOST_SIZE + screen_x, y*self.gridSize*GHOST_SIZE + screen_y))
+
+ colour = self.getGhostColor(ghost, agentIndex)
+ body = polygon(coords, colour, filled = 1)
+ WHITE = formatColor(1.0, 1.0, 1.0)
+ BLACK = formatColor(0.0, 0.0, 0.0)
+
+ dx = 0
+ dy = 0
+ if dir == 'North':
+ dy = -0.2
+ if dir == 'South':
+ dy = 0.2
+ if dir == 'East':
+ dx = 0.2
+ if dir == 'West':
+ dx = -0.2
+ leftEye = circle((screen_x+self.gridSize*GHOST_SIZE*(-0.3+dx/1.5), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy/1.5)), self.gridSize*GHOST_SIZE*0.2, WHITE, WHITE)
+ rightEye = circle((screen_x+self.gridSize*GHOST_SIZE*(0.3+dx/1.5), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy/1.5)), self.gridSize*GHOST_SIZE*0.2, WHITE, WHITE)
+ leftPupil = circle((screen_x+self.gridSize*GHOST_SIZE*(-0.3+dx), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy)), self.gridSize*GHOST_SIZE*0.08, BLACK, BLACK)
+ rightPupil = circle((screen_x+self.gridSize*GHOST_SIZE*(0.3+dx), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy)), self.gridSize*GHOST_SIZE*0.08, BLACK, BLACK)
+ ghostImageParts = []
+ ghostImageParts.append(body)
+ ghostImageParts.append(leftEye)
+ ghostImageParts.append(rightEye)
+ ghostImageParts.append(leftPupil)
+ ghostImageParts.append(rightPupil)
+
+ return ghostImageParts
+
+ def moveEyes(self, pos, dir, eyes):
+ (screen_x, screen_y) = (self.to_screen(pos) )
+ dx = 0
+ dy = 0
+ if dir == 'North':
+ dy = -0.2
+ if dir == 'South':
+ dy = 0.2
+ if dir == 'East':
+ dx = 0.2
+ if dir == 'West':
+ dx = -0.2
+ moveCircle(eyes[0],(screen_x+self.gridSize*GHOST_SIZE*(-0.3+dx/1.5), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy/1.5)), self.gridSize*GHOST_SIZE*0.2)
+ moveCircle(eyes[1],(screen_x+self.gridSize*GHOST_SIZE*(0.3+dx/1.5), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy/1.5)), self.gridSize*GHOST_SIZE*0.2)
+ moveCircle(eyes[2],(screen_x+self.gridSize*GHOST_SIZE*(-0.3+dx), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy)), self.gridSize*GHOST_SIZE*0.08)
+ moveCircle(eyes[3],(screen_x+self.gridSize*GHOST_SIZE*(0.3+dx), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy)), self.gridSize*GHOST_SIZE*0.08)
+
+ def moveGhost(self, ghost, ghostIndex, prevGhost, ghostImageParts):
+ old_x, old_y = self.to_screen(self.getPosition(prevGhost))
+ new_x, new_y = self.to_screen(self.getPosition(ghost))
+ delta = new_x - old_x, new_y - old_y
+
+ for ghostImagePart in ghostImageParts:
+ move_by(ghostImagePart, delta)
+ refresh()
+
+ if ghost.scaredTimer > 0:
+ color = SCARED_COLOR
+ else:
+ color = GHOST_COLORS[ghostIndex]
+ edit(ghostImageParts[0], ('fill', color), ('outline', color))
+ self.moveEyes(self.getPosition(ghost), self.getDirection(ghost), ghostImageParts[-4:])
+ refresh()
+
+ def getPosition(self, agentState):
+ if agentState.configuration == None: return (-1000, -1000)
+ return agentState.getPosition()
+
+ def getDirection(self, agentState):
+ if agentState.configuration == None: return Directions.STOP
+ return agentState.configuration.getDirection()
+
+ def finish(self):
+ end_graphics()
+
+ def to_screen(self, point):
+ ( x, y ) = point
+ #y = self.height - y
+ x = (x + 1)*self.gridSize
+ y = (self.height - y)*self.gridSize
+ return ( x, y )
+
+ # Fixes some TK issue with off-center circles
+ def to_screen2(self, point):
+ ( x, y ) = point
+ #y = self.height - y
+ x = (x + 1)*self.gridSize
+ y = (self.height - y)*self.gridSize
+ return ( x, y )
+
+ def drawWalls(self, wallMatrix):
+ wallColor = WALL_COLOR
+ for xNum, x in enumerate(wallMatrix):
+ if self.capture and (xNum * 2) < wallMatrix.width: wallColor = TEAM_COLORS[0]
+ if self.capture and (xNum * 2) >= wallMatrix.width: wallColor = TEAM_COLORS[1]
+
+ for yNum, cell in enumerate(x):
+ if cell: # There's a wall here
+ pos = (xNum, yNum)
+ screen = self.to_screen(pos)
+ screen2 = self.to_screen2(pos)
+
+ # draw each quadrant of the square based on adjacent walls
+ wIsWall = self.isWall(xNum-1, yNum, wallMatrix)
+ eIsWall = self.isWall(xNum+1, yNum, wallMatrix)
+ nIsWall = self.isWall(xNum, yNum+1, wallMatrix)
+ sIsWall = self.isWall(xNum, yNum-1, wallMatrix)
+ nwIsWall = self.isWall(xNum-1, yNum+1, wallMatrix)
+ swIsWall = self.isWall(xNum-1, yNum-1, wallMatrix)
+ neIsWall = self.isWall(xNum+1, yNum+1, wallMatrix)
+ seIsWall = self.isWall(xNum+1, yNum-1, wallMatrix)
+
+ # NE quadrant
+ if (not nIsWall) and (not eIsWall):
+ # inner circle
+ circle(screen2, WALL_RADIUS * self.gridSize, wallColor, wallColor, (0,91), 'arc')
+ if (nIsWall) and (not eIsWall):
+ # vertical line
+ line(add(screen, (self.gridSize*WALL_RADIUS, 0)), add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(-0.5)-1)), wallColor)
+ if (not nIsWall) and (eIsWall):
+ # horizontal line
+ line(add(screen, (0, self.gridSize*(-1)*WALL_RADIUS)), add(screen, (self.gridSize*0.5+1, self.gridSize*(-1)*WALL_RADIUS)), wallColor)
+ if (nIsWall) and (eIsWall) and (not neIsWall):
+ # outer circle
+ circle(add(screen2, (self.gridSize*2*WALL_RADIUS, self.gridSize*(-2)*WALL_RADIUS)), WALL_RADIUS * self.gridSize-1, wallColor, wallColor, (180,271), 'arc')
+ line(add(screen, (self.gridSize*2*WALL_RADIUS-1, self.gridSize*(-1)*WALL_RADIUS)), add(screen, (self.gridSize*0.5+1, self.gridSize*(-1)*WALL_RADIUS)), wallColor)
+ line(add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(-2)*WALL_RADIUS+1)), add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(-0.5))), wallColor)
+
+ # NW quadrant
+ if (not nIsWall) and (not wIsWall):
+ # inner circle
+ circle(screen2, WALL_RADIUS * self.gridSize, wallColor, wallColor, (90,181), 'arc')
+ if (nIsWall) and (not wIsWall):
+ # vertical line
+ line(add(screen, (self.gridSize*(-1)*WALL_RADIUS, 0)), add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(-0.5)-1)), wallColor)
+ if (not nIsWall) and (wIsWall):
+ # horizontal line
+ line(add(screen, (0, self.gridSize*(-1)*WALL_RADIUS)), add(screen, (self.gridSize*(-0.5)-1, self.gridSize*(-1)*WALL_RADIUS)), wallColor)
+ if (nIsWall) and (wIsWall) and (not nwIsWall):
+ # outer circle
+ circle(add(screen2, (self.gridSize*(-2)*WALL_RADIUS, self.gridSize*(-2)*WALL_RADIUS)), WALL_RADIUS * self.gridSize-1, wallColor, wallColor, (270,361), 'arc')
+ line(add(screen, (self.gridSize*(-2)*WALL_RADIUS+1, self.gridSize*(-1)*WALL_RADIUS)), add(screen, (self.gridSize*(-0.5), self.gridSize*(-1)*WALL_RADIUS)), wallColor)
+ line(add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(-2)*WALL_RADIUS+1)), add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(-0.5))), wallColor)
+
+ # SE quadrant
+ if (not sIsWall) and (not eIsWall):
+ # inner circle
+ circle(screen2, WALL_RADIUS * self.gridSize, wallColor, wallColor, (270,361), 'arc')
+ if (sIsWall) and (not eIsWall):
+ # vertical line
+ line(add(screen, (self.gridSize*WALL_RADIUS, 0)), add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(0.5)+1)), wallColor)
+ if (not sIsWall) and (eIsWall):
+ # horizontal line
+ line(add(screen, (0, self.gridSize*(1)*WALL_RADIUS)), add(screen, (self.gridSize*0.5+1, self.gridSize*(1)*WALL_RADIUS)), wallColor)
+ if (sIsWall) and (eIsWall) and (not seIsWall):
+ # outer circle
+ circle(add(screen2, (self.gridSize*2*WALL_RADIUS, self.gridSize*(2)*WALL_RADIUS)), WALL_RADIUS * self.gridSize-1, wallColor, wallColor, (90,181), 'arc')
+ line(add(screen, (self.gridSize*2*WALL_RADIUS-1, self.gridSize*(1)*WALL_RADIUS)), add(screen, (self.gridSize*0.5, self.gridSize*(1)*WALL_RADIUS)), wallColor)
+ line(add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(2)*WALL_RADIUS-1)), add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(0.5))), wallColor)
+
+ # SW quadrant
+ if (not sIsWall) and (not wIsWall):
+ # inner circle
+ circle(screen2, WALL_RADIUS * self.gridSize, wallColor, wallColor, (180,271), 'arc')
+ if (sIsWall) and (not wIsWall):
+ # vertical line
+ line(add(screen, (self.gridSize*(-1)*WALL_RADIUS, 0)), add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(0.5)+1)), wallColor)
+ if (not sIsWall) and (wIsWall):
+ # horizontal line
+ line(add(screen, (0, self.gridSize*(1)*WALL_RADIUS)), add(screen, (self.gridSize*(-0.5)-1, self.gridSize*(1)*WALL_RADIUS)), wallColor)
+ if (sIsWall) and (wIsWall) and (not swIsWall):
+ # outer circle
+ circle(add(screen2, (self.gridSize*(-2)*WALL_RADIUS, self.gridSize*(2)*WALL_RADIUS)), WALL_RADIUS * self.gridSize-1, wallColor, wallColor, (0,91), 'arc')
+ line(add(screen, (self.gridSize*(-2)*WALL_RADIUS+1, self.gridSize*(1)*WALL_RADIUS)), add(screen, (self.gridSize*(-0.5), self.gridSize*(1)*WALL_RADIUS)), wallColor)
+ line(add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(2)*WALL_RADIUS-1)), add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(0.5))), wallColor)
+
+ def isWall(self, x, y, walls):
+ if x < 0 or y < 0:
+ return False
+ if x >= walls.width or y >= walls.height:
+ return False
+ return walls[x][y]
+
+ def drawFood(self, foodMatrix ):
+ foodImages = []
+ color = FOOD_COLOR
+ for xNum, x in enumerate(foodMatrix):
+ if self.capture and (xNum * 2) <= foodMatrix.width: color = TEAM_COLORS[0]
+ if self.capture and (xNum * 2) > foodMatrix.width: color = TEAM_COLORS[1]
+ imageRow = []
+ foodImages.append(imageRow)
+ for yNum, cell in enumerate(x):
+ if cell: # There's food here
+ screen = self.to_screen((xNum, yNum ))
+ dot = circle( screen,
+ FOOD_SIZE * self.gridSize,
+ outlineColor = color, fillColor = color,
+ width = 1)
+ imageRow.append(dot)
+ else:
+ imageRow.append(None)
+ return foodImages
+
+ def drawCapsules(self, capsules ):
+ capsuleImages = {}
+ for capsule in capsules:
+ ( screen_x, screen_y ) = self.to_screen(capsule)
+ dot = circle( (screen_x, screen_y),
+ CAPSULE_SIZE * self.gridSize,
+ outlineColor = CAPSULE_COLOR,
+ fillColor = CAPSULE_COLOR,
+ width = 1)
+ capsuleImages[capsule] = dot
+ return capsuleImages
+
+ def removeFood(self, cell, foodImages ):
+ x, y = cell
+ remove_from_screen(foodImages[x][y])
+
+ def removeCapsule(self, cell, capsuleImages ):
+ x, y = cell
+ remove_from_screen(capsuleImages[(x, y)])
+
+ def drawExpandedCells(self, cells):
+ """
+ Draws an overlay of expanded grid positions for search agents
+ """
+ n = float(len(cells))
+ baseColor = [1.0, 0.0, 0.0]
+ self.clearExpandedCells()
+ self.expandedCells = []
+ for k, cell in enumerate(cells):
+ screenPos = self.to_screen( cell)
+ cellColor = formatColor(*[(n-k) * c * .5 / n + .25 for c in baseColor])
+ block = square(screenPos,
+ 0.5 * self.gridSize,
+ color = cellColor,
+ filled = 1, behind=2)
+ self.expandedCells.append(block)
+ if self.frameTime < 0:
+ refresh()
+
+ def clearExpandedCells(self):
+ if 'expandedCells' in dir(self) and len(self.expandedCells) > 0:
+ for cell in self.expandedCells:
+ remove_from_screen(cell)
+
+
+ def updateDistributions(self, distributions):
+ "Draws an agent's belief distributions"
+ # copy all distributions so we don't change their state
+ distributions = map(lambda x: x.copy(), distributions)
+ if self.distributionImages == None:
+ self.drawDistributions(self.previousState)
+ for x in range(len(self.distributionImages)):
+ for y in range(len(self.distributionImages[0])):
+ image = self.distributionImages[x][y]
+ weights = [dist[ (x,y) ] for dist in distributions]
+
+ if sum(weights) != 0:
+ pass
+ # Fog of war
+ color = [0.0,0.0,0.0]
+ colors = GHOST_VEC_COLORS[1:] # With Pacman
+ if self.capture: colors = GHOST_VEC_COLORS
+ for weight, gcolor in zip(weights, colors):
+ color = [min(1.0, c + 0.95 * g * weight ** .3) for c,g in zip(color, gcolor)]
+ changeColor(image, formatColor(*color))
+ refresh()
+
+class FirstPersonPacmanGraphics(PacmanGraphics):
+ def __init__(self, zoom = 1.0, showGhosts = True, capture = False, frameTime=0):
+ PacmanGraphics.__init__(self, zoom, frameTime=frameTime)
+ self.showGhosts = showGhosts
+ self.capture = capture
+
+ def initialize(self, state, isBlue = False):
+
+ self.isBlue = isBlue
+ PacmanGraphics.startGraphics(self, state)
+ # Initialize distribution images
+ walls = state.layout.walls
+ dist = []
+ self.layout = state.layout
+
+ # Draw the rest
+ self.distributionImages = None # initialize lazily
+ self.drawStaticObjects(state)
+ self.drawAgentObjects(state)
+
+ # Information
+ self.previousState = state
+
+ def lookAhead(self, config, state):
+ if config.getDirection() == 'Stop':
+ return
+ else:
+ pass
+ # Draw relevant ghosts
+ allGhosts = state.getGhostStates()
+ visibleGhosts = state.getVisibleGhosts()
+ for i, ghost in enumerate(allGhosts):
+ if ghost in visibleGhosts:
+ self.drawGhost(ghost, i)
+ else:
+ self.currentGhostImages[i] = None
+
+ def getGhostColor(self, ghost, ghostIndex):
+ return GHOST_COLORS[ghostIndex]
+
+ def getPosition(self, ghostState):
+ if not self.showGhosts and not ghostState.isPacman and ghostState.getPosition()[1] > 1:
+ return (-1000, -1000)
+ else:
+ return PacmanGraphics.getPosition(self, ghostState)
+
+def add(x, y):
+ return (x[0] + y[0], x[1] + y[1])
+
+
+# Saving graphical output
+# -----------------------
+# Note: to make an animated gif from this postscript output, try the command:
+# convert -delay 7 -loop 1 -compress lzw -layers optimize frame* out.gif
+# convert is part of imagemagick (freeware)
+
+SAVE_POSTSCRIPT = False
+POSTSCRIPT_OUTPUT_DIR = 'frames'
+FRAME_NUMBER = 0
+import os
+
+def saveFrame():
+ "Saves the current graphical output as a postscript file"
+ global SAVE_POSTSCRIPT, FRAME_NUMBER, POSTSCRIPT_OUTPUT_DIR
+ if not SAVE_POSTSCRIPT: return
+ if not os.path.exists(POSTSCRIPT_OUTPUT_DIR): os.mkdir(POSTSCRIPT_OUTPUT_DIR)
+ name = os.path.join(POSTSCRIPT_OUTPUT_DIR, 'frame_%08d.ps' % FRAME_NUMBER)
+ FRAME_NUMBER += 1
+ writePostscript(name) # writes the current canvas
diff --git a/graphicsUtils.py b/graphicsUtils.py
new file mode 100644
index 0000000..a1d8bdc
--- /dev/null
+++ b/graphicsUtils.py
@@ -0,0 +1,398 @@
+# graphicsUtils.py
+# ----------------
+# Licensing Information: You are free to use or extend these projects for
+# educational purposes provided that (1) you do not distribute or publish
+# solutions, (2) you retain this notice, and (3) you provide clear
+# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
+#
+# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
+# The core projects and autograders were primarily created by John DeNero
+# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
+# Student side autograding was added by Brad Miller, Nick Hay, and
+# Pieter Abbeel (pabbeel@cs.berkeley.edu).
+
+
+import sys
+import math
+import random
+import string
+import time
+import types
+import Tkinter
+
+_Windows = sys.platform == 'win32' # True if on Win95/98/NT
+
+_root_window = None # The root window for graphics output
+_canvas = None # The canvas which holds graphics
+_canvas_xs = None # Size of canvas object
+_canvas_ys = None
+_canvas_x = None # Current position on canvas
+_canvas_y = None
+_canvas_col = None # Current colour (set to black below)
+_canvas_tsize = 12
+_canvas_tserifs = 0
+
+def formatColor(r, g, b):
+ return '#%02x%02x%02x' % (int(r * 255), int(g * 255), int(b * 255))
+
+def colorToVector(color):
+ return map(lambda x: int(x, 16) / 256.0, [color[1:3], color[3:5], color[5:7]])
+
+if _Windows:
+ _canvas_tfonts = ['times new roman', 'lucida console']
+else:
+ _canvas_tfonts = ['times', 'lucidasans-24']
+ pass # XXX need defaults here
+
+def sleep(secs):
+ global _root_window
+ if _root_window == None:
+ time.sleep(secs)
+ else:
+ _root_window.update_idletasks()
+ _root_window.after(int(1000 * secs), _root_window.quit)
+ _root_window.mainloop()
+
+def begin_graphics(width=640, height=480, color=formatColor(0, 0, 0), title=None):
+
+ global _root_window, _canvas, _canvas_x, _canvas_y, _canvas_xs, _canvas_ys, _bg_color
+
+ # Check for duplicate call
+ if _root_window is not None:
+ # Lose the window.
+ _root_window.destroy()
+
+ # Save the canvas size parameters
+ _canvas_xs, _canvas_ys = width - 1, height - 1
+ _canvas_x, _canvas_y = 0, _canvas_ys
+ _bg_color = color
+
+ # Create the root window
+ _root_window = Tkinter.Tk()
+ _root_window.protocol('WM_DELETE_WINDOW', _destroy_window)
+ _root_window.title(title or 'Graphics Window')
+ _root_window.resizable(0, 0)
+
+ # Create the canvas object
+ try:
+ _canvas = Tkinter.Canvas(_root_window, width=width, height=height)
+ _canvas.pack()
+ draw_background()
+ _canvas.update()
+ except:
+ _root_window = None
+ raise
+
+ # Bind to key-down and key-up events
+ _root_window.bind( "<KeyPress>", _keypress )
+ _root_window.bind( "<KeyRelease>", _keyrelease )
+ _root_window.bind( "<FocusIn>", _clear_keys )
+ _root_window.bind( "<FocusOut>", _clear_keys )
+ _root_window.bind( "<Button-1>", _leftclick )
+ _root_window.bind( "<Button-2>", _rightclick )
+ _root_window.bind( "<Button-3>", _rightclick )
+ _root_window.bind( "<Control-Button-1>", _ctrl_leftclick)
+ _clear_keys()
+
+_leftclick_loc = None
+_rightclick_loc = None
+_ctrl_leftclick_loc = None
+
+def _leftclick(event):
+ global _leftclick_loc
+ _leftclick_loc = (event.x, event.y)
+
+def _rightclick(event):
+ global _rightclick_loc
+ _rightclick_loc = (event.x, event.y)
+
+def _ctrl_leftclick(event):
+ global _ctrl_leftclick_loc
+ _ctrl_leftclick_loc = (event.x, event.y)
+
+def wait_for_click():
+ while True:
+ global _leftclick_loc
+ global _rightclick_loc
+ global _ctrl_leftclick_loc
+ if _leftclick_loc != None:
+ val = _leftclick_loc
+ _leftclick_loc = None
+ return val, 'left'
+ if _rightclick_loc != None:
+ val = _rightclick_loc
+ _rightclick_loc = None
+ return val, 'right'
+ if _ctrl_leftclick_loc != None:
+ val = _ctrl_leftclick_loc
+ _ctrl_leftclick_loc = None
+ return val, 'ctrl_left'
+ sleep(0.05)
+
+def draw_background():
+ corners = [(0,0), (0, _canvas_ys), (_canvas_xs, _canvas_ys), (_canvas_xs, 0)]
+ polygon(corners, _bg_color, fillColor=_bg_color, filled=True, smoothed=False)
+
+def _destroy_window(event=None):
+ sys.exit(0)
+# global _root_window
+# _root_window.destroy()
+# _root_window = None
+ #print "DESTROY"
+
+def end_graphics():
+ global _root_window, _canvas, _mouse_enabled
+ try:
+ try:
+ sleep(1)
+ if _root_window != None:
+ _root_window.destroy()
+ except SystemExit, e:
+ print 'Ending graphics raised an exception:', e
+ finally:
+ _root_window = None
+ _canvas = None
+ _mouse_enabled = 0
+ _clear_keys()
+
+def clear_screen(background=None):
+ global _canvas_x, _canvas_y
+ _canvas.delete('all')
+ draw_background()
+ _canvas_x, _canvas_y = 0, _canvas_ys
+
+def polygon(coords, outlineColor, fillColor=None, filled=1, smoothed=1, behind=0, width=1):
+ c = []
+ for coord in coords:
+ c.append(coord[0])
+ c.append(coord[1])
+ if fillColor == None: fillColor = outlineColor
+ if filled == 0: fillColor = ""
+ poly = _canvas.create_polygon(c, outline=outlineColor, fill=fillColor, smooth=smoothed, width=width)
+ if behind > 0:
+ _canvas.tag_lower(poly, behind) # Higher should be more visible
+ return poly
+
+def square(pos, r, color, filled=1, behind=0):
+ x, y = pos
+ coords = [(x - r, y - r), (x + r, y - r), (x + r, y + r), (x - r, y + r)]
+ return polygon(coords, color, color, filled, 0, behind=behind)
+
+def circle(pos, r, outlineColor, fillColor, endpoints=None, style='pieslice', width=2):
+ x, y = pos
+ x0, x1 = x - r - 1, x + r
+ y0, y1 = y - r - 1, y + r
+ if endpoints == None:
+ e = [0, 359]
+ else:
+ e = list(endpoints)
+ while e[0] > e[1]: e[1] = e[1] + 360
+
+ return _canvas.create_arc(x0, y0, x1, y1, outline=outlineColor, fill=fillColor,
+ extent=e[1] - e[0], start=e[0], style=style, width=width)
+
+def image(pos, file="../../blueghost.gif"):
+ x, y = pos
+ # img = PhotoImage(file=file)
+ return _canvas.create_image(x, y, image = Tkinter.PhotoImage(file=file), anchor = Tkinter.NW)
+
+
+def refresh():
+ _canvas.update_idletasks()
+
+def moveCircle(id, pos, r, endpoints=None):
+ global _canvas_x, _canvas_y
+
+ x, y = pos
+# x0, x1 = x - r, x + r + 1
+# y0, y1 = y - r, y + r + 1
+ x0, x1 = x - r - 1, x + r
+ y0, y1 = y - r - 1, y + r
+ if endpoints == None:
+ e = [0, 359]
+ else:
+ e = list(endpoints)
+ while e[0] > e[1]: e[1] = e[1] + 360
+
+ edit(id, ('start', e[0]), ('extent', e[1] - e[0]))
+ move_to(id, x0, y0)
+
+def edit(id, *args):
+ _canvas.itemconfigure(id, **dict(args))
+
+def text(pos, color, contents, font='Helvetica', size=12, style='normal', anchor="nw"):
+ global _canvas_x, _canvas_y
+ x, y = pos
+ font = (font, str(size), style)
+ return _canvas.create_text(x, y, fill=color, text=contents, font=font, anchor=anchor)
+
+def changeText(id, newText, font=None, size=12, style='normal'):
+ _canvas.itemconfigure(id, text=newText)
+ if font != None:
+ _canvas.itemconfigure(id, font=(font, '-%d' % size, style))
+
+def changeColor(id, newColor):
+ _canvas.itemconfigure(id, fill=newColor)
+
+def line(here, there, color=formatColor(0, 0, 0), width=2):
+ x0, y0 = here[0], here[1]
+ x1, y1 = there[0], there[1]
+ return _canvas.create_line(x0, y0, x1, y1, fill=color, width=width)
+
+##############################################################################
+### Keypress handling ########################################################
+##############################################################################
+
+# We bind to key-down and key-up events.
+
+_keysdown = {}
+_keyswaiting = {}
+# This holds an unprocessed key release. We delay key releases by up to
+# one call to keys_pressed() to get round a problem with auto repeat.
+_got_release = None
+
+def _keypress(event):
+ global _got_release
+ #remap_arrows(event)
+ _keysdown[event.keysym] = 1
+ _keyswaiting[event.keysym] = 1
+# print event.char, event.keycode
+ _got_release = None
+
+def _keyrelease(event):
+ global _got_release
+ #remap_arrows(event)
+ try:
+ del _keysdown[event.keysym]
+ except:
+ pass
+ _got_release = 1
+
+def remap_arrows(event):
+ # TURN ARROW PRESSES INTO LETTERS (SHOULD BE IN KEYBOARD AGENT)
+ if event.char in ['a', 's', 'd', 'w']:
+ return
+ if event.keycode in [37, 101]: # LEFT ARROW (win / x)
+ event.char = 'a'
+ if event.keycode in [38, 99]: # UP ARROW
+ event.char = 'w'
+ if event.keycode in [39, 102]: # RIGHT ARROW
+ event.char = 'd'
+ if event.keycode in [40, 104]: # DOWN ARROW
+ event.char = 's'
+
+def _clear_keys(event=None):
+ global _keysdown, _got_release, _keyswaiting
+ _keysdown = {}
+ _keyswaiting = {}
+ _got_release = None
+
+def keys_pressed(d_o_e=Tkinter.tkinter.dooneevent,
+ d_w=Tkinter.tkinter.DONT_WAIT):
+ d_o_e(d_w)
+ if _got_release:
+ d_o_e(d_w)
+ return _keysdown.keys()
+
+def keys_waiting():
+ global _keyswaiting
+ keys = _keyswaiting.keys()
+ _keyswaiting = {}
+ return keys
+
+# Block for a list of keys...
+
+def wait_for_keys():
+ keys = []
+ while keys == []:
+ keys = keys_pressed()
+ sleep(0.05)
+ return keys
+
+def remove_from_screen(x,
+ d_o_e=Tkinter.tkinter.dooneevent,
+ d_w=Tkinter.tkinter.DONT_WAIT):
+ _canvas.delete(x)
+ d_o_e(d_w)
+
+def _adjust_coords(coord_list, x, y):
+ for i in range(0, len(coord_list), 2):
+ coord_list[i] = coord_list[i] + x
+ coord_list[i + 1] = coord_list[i + 1] + y
+ return coord_list
+
+def move_to(object, x, y=None,
+ d_o_e=Tkinter.tkinter.dooneevent,
+ d_w=Tkinter.tkinter.DONT_WAIT):
+ if y is None:
+ try: x, y = x
+ except: raise 'incomprehensible coordinates'
+
+ horiz = True
+ newCoords = []
+ current_x, current_y = _canvas.coords(object)[0:2] # first point
+ for coord in _canvas.coords(object):
+ if horiz:
+ inc = x - current_x
+ else:
+ inc = y - current_y
+ horiz = not horiz
+
+ newCoords.append(coord + inc)
+
+ _canvas.coords(object, *newCoords)
+ d_o_e(d_w)
+
+def move_by(object, x, y=None,
+ d_o_e=Tkinter.tkinter.dooneevent,
+ d_w=Tkinter.tkinter.DONT_WAIT, lift=False):
+ if y is None:
+ try: x, y = x
+ except: raise Exception, 'incomprehensible coordinates'
+
+ horiz = True
+ newCoords = []
+ for coord in _canvas.coords(object):
+ if horiz:
+ inc = x
+ else:
+ inc = y
+ horiz = not horiz
+
+ newCoords.append(coord + inc)
+
+ _canvas.coords(object, *newCoords)
+ d_o_e(d_w)
+ if lift:
+ _canvas.tag_raise(object)
+
+def writePostscript(filename):
+ "Writes the current canvas to a postscript file."
+ psfile = file(filename, 'w')
+ psfile.write(_canvas.postscript(pageanchor='sw',
+ y='0.c',
+ x='0.c'))
+ psfile.close()
+
+ghost_shape = [
+ (0, - 0.5),
+ (0.25, - 0.75),
+ (0.5, - 0.5),
+ (0.75, - 0.75),
+ (0.75, 0.5),
+ (0.5, 0.75),
+ (- 0.5, 0.75),
+ (- 0.75, 0.5),
+ (- 0.75, - 0.75),
+ (- 0.5, - 0.5),
+ (- 0.25, - 0.75)
+ ]
+
+if __name__ == '__main__':
+ begin_graphics()
+ clear_screen()
+ ghost_shape = [(x * 10 + 20, y * 10 + 20) for x, y in ghost_shape]
+ g = polygon(ghost_shape, formatColor(1, 1, 1))
+ move_to(g, (50, 50))
+ circle((150, 150), 20, formatColor(0.7, 0.3, 0.0), endpoints=[15, - 15])
+ sleep(2)
diff --git a/keyboardAgents.py b/keyboardAgents.py
new file mode 100644
index 0000000..c7d9fcf
--- /dev/null
+++ b/keyboardAgents.py
@@ -0,0 +1,84 @@
+# keyboardAgents.py
+# -----------------
+# Licensing Information: You are free to use or extend these projects for
+# educational purposes provided that (1) you do not distribute or publish
+# solutions, (2) you retain this notice, and (3) you provide clear
+# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
+#
+# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
+# The core projects and autograders were primarily created by John DeNero
+# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
+# Student side autograding was added by Brad Miller, Nick Hay, and
+# Pieter Abbeel (pabbeel@cs.berkeley.edu).
+
+
+from game import Agent
+from game import Directions
+import random
+
+class KeyboardAgent(Agent):
+ """
+ An agent controlled by the keyboard.
+ """
+ # NOTE: Arrow keys also work.
+ WEST_KEY = 'a'
+ EAST_KEY = 'd'
+ NORTH_KEY = 'w'
+ SOUTH_KEY = 's'
+ STOP_KEY = 'q'
+
+ def __init__( self, index = 0 ):
+
+ self.lastMove = Directions.STOP
+ self.index = index
+ self.keys = []
+
+ def getAction( self, state):
+ from graphicsUtils import keys_waiting
+ from graphicsUtils import keys_pressed
+ keys = keys_waiting() + keys_pressed()
+ if keys != []:
+ self.keys = keys
+
+ legal = state.getLegalActions(self.index)
+ move = self.getMove(legal)
+
+ if move == Directions.STOP:
+ # Try to move in the same direction as before
+ if self.lastMove in legal:
+ move = self.lastMove
+
+ if (self.STOP_KEY in self.keys) and Directions.STOP in legal: move = Directions.STOP
+
+ if move not in legal:
+ move = random.choice(legal)
+
+ self.lastMove = move
+ return move
+
+ def getMove(self, legal):
+ move = Directions.STOP
+ if (self.WEST_KEY in self.keys or 'Left' in self.keys) and Directions.WEST in legal: move = Directions.WEST
+ if (self.EAST_KEY in self.keys or 'Right' in self.keys) and Directions.EAST in legal: move = Directions.EAST
+ if (self.NORTH_KEY in self.keys or 'Up' in self.keys) and Directions.NORTH in legal: move = Directions.NORTH
+ if (self.SOUTH_KEY in self.keys or 'Down' in self.keys) and Directions.SOUTH in legal: move = Directions.SOUTH
+ return move
+
+class KeyboardAgent2(KeyboardAgent):
+ """
+ A second agent controlled by the keyboard.
+ """
+ # NOTE: Arrow keys also work.
+ WEST_KEY = 'j'
+ EAST_KEY = "l"
+ NORTH_KEY = 'i'
+ SOUTH_KEY = 'k'
+ STOP_KEY = 'u'
+
+ def getMove(self, legal):
+ move = Directions.STOP
+ if (self.WEST_KEY in self.keys) and Directions.WEST in legal: move = Directions.WEST
+ if (self.EAST_KEY in self.keys) and Directions.EAST in legal: move = Directions.EAST
+ if (self.NORTH_KEY in self.keys) and Directions.NORTH in legal: move = Directions.NORTH
+ if (self.SOUTH_KEY in self.keys) and Directions.SOUTH in legal: move = Directions.SOUTH
+ return move
diff --git a/layout.py b/layout.py
new file mode 100644
index 0000000..c6b377d
--- /dev/null
+++ b/layout.py
@@ -0,0 +1,149 @@
+# layout.py
+# ---------
+# Licensing Information: You are free to use or extend these projects for
+# educational purposes provided that (1) you do not distribute or publish
+# solutions, (2) you retain this notice, and (3) you provide clear
+# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
+#
+# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
+# The core projects and autograders were primarily created by John DeNero
+# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
+# Student side autograding was added by Brad Miller, Nick Hay, and
+# Pieter Abbeel (pabbeel@cs.berkeley.edu).
+
+
+from util import manhattanDistance
+from game import Grid
+import os
+import random
+
+VISIBILITY_MATRIX_CACHE = {}
+
+class Layout:
+ """
+ A Layout manages the static information about the game board.
+ """
+
+ def __init__(self, layoutText):
+ self.width = len(layoutText[0])
+ self.height= len(layoutText)
+ self.walls = Grid(self.width, self.height, False)
+ self.food = Grid(self.width, self.height, False)
+ self.capsules = []
+ self.agentPositions = []
+ self.numGhosts = 0
+ self.processLayoutText(layoutText)
+ self.layoutText = layoutText
+ self.totalFood = len(self.food.asList())
+ # self.initializeVisibilityMatrix()
+
+ def getNumGhosts(self):
+ return self.numGhosts
+
+ def initializeVisibilityMatrix(self):
+ global VISIBILITY_MATRIX_CACHE
+ if reduce(str.__add__, self.layoutText) not in VISIBILITY_MATRIX_CACHE:
+ from game import Directions
+ vecs = [(-0.5,0), (0.5,0),(0,-0.5),(0,0.5)]
+ dirs = [Directions.NORTH, Directions.SOUTH, Directions.WEST, Directions.EAST]
+ vis = Grid(self.width, self.height, {Directions.NORTH:set(), Directions.SOUTH:set(), Directions.EAST:set(), Directions.WEST:set(), Directions.STOP:set()})
+ for x in range(self.width):
+ for y in range(self.height):
+ if self.walls[x][y] == False:
+ for vec, direction in zip(vecs, dirs):
+ dx, dy = vec
+ nextx, nexty = x + dx, y + dy
+ while (nextx + nexty) != int(nextx) + int(nexty) or not self.walls[int(nextx)][int(nexty)] :
+ vis[x][y][direction].add((nextx, nexty))
+ nextx, nexty = x + dx, y + dy
+ self.visibility = vis
+ VISIBILITY_MATRIX_CACHE[reduce(str.__add__, self.layoutText)] = vis
+ else:
+ self.visibility = VISIBILITY_MATRIX_CACHE[reduce(str.__add__, self.layoutText)]
+
+ def isWall(self, pos):
+ x, col = pos
+ return self.walls[x][col]
+
+ def getRandomLegalPosition(self):
+ x = random.choice(range(self.width))
+ y = random.choice(range(self.height))
+ while self.isWall( (x, y) ):
+ x = random.choice(range(self.width))
+ y = random.choice(range(self.height))
+ return (x,y)
+
+ def getRandomCorner(self):
+ poses = [(1,1), (1, self.height - 2), (self.width - 2, 1), (self.width - 2, self.height - 2)]
+ return random.choice(poses)
+
+ def getFurthestCorner(self, pacPos):
+ poses = [(1,1), (1, self.height - 2), (self.width - 2, 1), (self.width - 2, self.height - 2)]
+ dist, pos = max([(manhattanDistance(p, pacPos), p) for p in poses])
+ return pos
+
+ def isVisibleFrom(self, ghostPos, pacPos, pacDirection):
+ row, col = [int(x) for x in pacPos]
+ return ghostPos in self.visibility[row][col][pacDirection]
+
+ def __str__(self):
+ return "\n".join(self.layoutText)
+
+ def deepCopy(self):
+ return Layout(self.layoutText[:])
+
+ def processLayoutText(self, layoutText):
+ """
+ Coordinates are flipped from the input format to the (x,y) convention here
+
+ The shape of the maze. Each character
+ represents a different type of object.
+ % - Wall
+ . - Food
+ o - Capsule
+ G - Ghost
+ P - Pacman
+ Other characters are ignored.
+ """
+ maxY = self.height - 1
+ for y in range(self.height):
+ for x in range(self.width):
+ layoutChar = layoutText[maxY - y][x]
+ self.processLayoutChar(x, y, layoutChar)
+ self.agentPositions.sort()
+ self.agentPositions = [ ( i == 0, pos) for i, pos in self.agentPositions]
+
+ def processLayoutChar(self, x, y, layoutChar):
+ if layoutChar == '%':
+ self.walls[x][y] = True
+ elif layoutChar == '.':
+ self.food[x][y] = True
+ elif layoutChar == 'o':
+ self.capsules.append((x, y))
+ elif layoutChar == 'P':
+ self.agentPositions.append( (0, (x, y) ) )
+ elif layoutChar in ['G']:
+ self.agentPositions.append( (1, (x, y) ) )
+ self.numGhosts += 1
+ elif layoutChar in ['1', '2', '3', '4']:
+ self.agentPositions.append( (int(layoutChar), (x,y)))
+ self.numGhosts += 1
+def getLayout(name, back = 2):
+ if name.endswith('.lay'):
+ layout = tryToLoad('layouts/' + name)
+ if layout == None: layout = tryToLoad(name)
+ else:
+ layout = tryToLoad('layouts/' + name + '.lay')
+ if layout == None: layout = tryToLoad(name + '.lay')
+ if layout == None and back >= 0:
+ curdir = os.path.abspath('.')
+ os.chdir('..')
+ layout = getLayout(name, back -1)
+ os.chdir(curdir)
+ return layout
+
+def tryToLoad(fullname):
+ if(not os.path.exists(fullname)): return None
+ f = open(fullname)
+ try: return Layout([line.strip() for line in f])
+ finally: f.close()
diff --git a/layouts/capsuleClassic.lay b/layouts/capsuleClassic.lay
new file mode 100644
index 0000000..06a5c51
--- /dev/null
+++ b/layouts/capsuleClassic.lay
@@ -0,0 +1,7 @@
+%%%%%%%%%%%%%%%%%%%
+%G. G ....%
+%.% % %%%%%% %.%%.%
+%.%o% % o% %.o%.%
+%.%%%.% %%% %..%.%
+%..... P %..%G%
+%%%%%%%%%%%%%%%%%%%%
diff --git a/layouts/contestClassic.lay b/layouts/contestClassic.lay
new file mode 100644
index 0000000..84c8733
--- /dev/null
+++ b/layouts/contestClassic.lay
@@ -0,0 +1,9 @@
+%%%%%%%%%%%%%%%%%%%%
+%o...%........%...o%
+%.%%.%.%%..%%.%.%%.%
+%...... G GG%......%
+%.%.%%.%% %%%.%%.%.%
+%.%....% ooo%.%..%.%
+%.%.%%.% %% %.%.%%.%
+%o%......P....%....%
+%%%%%%%%%%%%%%%%%%%%
diff --git a/layouts/mediumClassic.lay b/layouts/mediumClassic.lay
new file mode 100644
index 0000000..33c5db8
--- /dev/null
+++ b/layouts/mediumClassic.lay
@@ -0,0 +1,11 @@
+%%%%%%%%%%%%%%%%%%%%
+%o...%........%....%
+%.%%.%.%%%%%%.%.%%.%
+%.%..............%.%
+%.%.%%.%% %%.%%.%.%
+%......%G G%......%
+%.%.%%.%%%%%%.%%.%.%
+%.%..............%.%
+%.%%.%.%%%%%%.%.%%.%
+%....%...P....%...o%
+%%%%%%%%%%%%%%%%%%%%
diff --git a/layouts/minimaxClassic.lay b/layouts/minimaxClassic.lay
new file mode 100644
index 0000000..a547397
--- /dev/null
+++ b/layouts/minimaxClassic.lay
@@ -0,0 +1,5 @@
+%%%%%%%%%
+%.P G%
+% %.%G%%%
+%G %%%
+%%%%%%%%%
diff --git a/layouts/openClassic.lay b/layouts/openClassic.lay
new file mode 100644
index 0000000..6760b42
--- /dev/null
+++ b/layouts/openClassic.lay
@@ -0,0 +1,9 @@
+%%%%%%%%%%%%%%%%%%%%%%%%%
+%.. P .... .... %
+%.. ... ... ... ... %
+%.. ... ... ... ... %
+%.. .... .... G %
+%.. ... ... ... ... %
+%.. ... ... ... ... %
+%.. .... .... o%
+%%%%%%%%%%%%%%%%%%%%%%%%%
diff --git a/layouts/originalClassic.lay b/layouts/originalClassic.lay
new file mode 100644
index 0000000..b2770c5
--- /dev/null
+++ b/layouts/originalClassic.lay
@@ -0,0 +1,27 @@
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%............%%............%
+%.%%%%.%%%%%.%%.%%%%%.%%%%.%
+%o%%%%.%%%%%.%%.%%%%%.%%%%o%
+%.%%%%.%%%%%.%%.%%%%%.%%%%.%
+%..........................%
+%.%%%%.%%.%%%%%%%%.%%.%%%%.%
+%.%%%%.%%.%%%%%%%%.%%.%%%%.%
+%......%%....%%....%%......%
+%%%%%%.%%%%% %% %%%%%.%%%%%%
+%%%%%%.%%%%% %% %%%%%.%%%%%%
+%%%%%%.% %.%%%%%%
+%%%%%%.% %%%% %%%% %.%%%%%%
+% . %G GG G% . %
+%%%%%%.% %%%%%%%%%% %.%%%%%%
+%%%%%%.% %.%%%%%%
+%%%%%%.% %%%%%%%%%% %.%%%%%%
+%............%%............%
+%.%%%%.%%%%%.%%.%%%%%.%%%%.%
+%.%%%%.%%%%%.%%.%%%%%.%%%%.%
+%o..%%....... .......%%..o%
+%%%.%%.%%.%%%%%%%%.%%.%%.%%%
+%%%.%%.%%.%%%%%%%%.%%.%%.%%%
+%......%%....%%....%%......%
+%.%%%%%%%%%%.%%.%%%%%%%%%%.%
+%.............P............%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%
diff --git a/layouts/smallClassic.lay b/layouts/smallClassic.lay
new file mode 100644
index 0000000..ce6c1d9
--- /dev/null
+++ b/layouts/smallClassic.lay
@@ -0,0 +1,7 @@
+%%%%%%%%%%%%%%%%%%%%
+%......%G G%......%
+%.%%...%% %%...%%.%
+%.%o.%........%.o%.%
+%.%%.%.%%%%%%.%.%%.%
+%........P.........%
+%%%%%%%%%%%%%%%%%%%%
diff --git a/layouts/testClassic.lay b/layouts/testClassic.lay
new file mode 100644
index 0000000..4b3ffca
--- /dev/null
+++ b/layouts/testClassic.lay
@@ -0,0 +1,10 @@
+%%%%%
+% . %
+%.G.%
+% . %
+%. .%
+% %
+% .%
+% %
+%P .%
+%%%%%
diff --git a/layouts/trappedClassic.lay b/layouts/trappedClassic.lay
new file mode 100644
index 0000000..289557f
--- /dev/null
+++ b/layouts/trappedClassic.lay
@@ -0,0 +1,5 @@
+%%%%%%%%
+% P G%
+%G%%%%%%
+%.... %
+%%%%%%%%
diff --git a/layouts/trickyClassic.lay b/layouts/trickyClassic.lay
new file mode 100644
index 0000000..ffa156c
--- /dev/null
+++ b/layouts/trickyClassic.lay
@@ -0,0 +1,13 @@
+%%%%%%%%%%%%%%%%%%%%
+%o...%........%...o%
+%.%%.%.%%..%%.%.%%.%
+%.%.....%..%.....%.%
+%.%.%%.%% %%.%%.%.%
+%...... GGGG%.%....%
+%.%....%%%%%%.%..%.%
+%.%....% oo%.%..%.%
+%.%....% %%%%.%..%.%
+%.%...........%..%.%
+%.%%.%.%%%%%%.%.%%.%
+%o...%...P....%...o%
+%%%%%%%%%%%%%%%%%%%%
diff --git a/multiAgents.py b/multiAgents.py
new file mode 100644
index 0000000..f6b8b8a
--- /dev/null
+++ b/multiAgents.py
@@ -0,0 +1,195 @@
+# multiAgents.py
+# --------------
+# Licensing Information: You are free to use or extend these projects for
+# educational purposes provided that (1) you do not distribute or publish
+# solutions, (2) you retain this notice, and (3) you provide clear
+# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
+#
+# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
+# The core projects and autograders were primarily created by John DeNero
+# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
+# Student side autograding was added by Brad Miller, Nick Hay, and
+# Pieter Abbeel (pabbeel@cs.berkeley.edu).
+
+
+from util import manhattanDistance
+from game import Directions
+import random, util
+
+from game import Agent
+
+class ReflexAgent(Agent):
+ """
+ A reflex agent chooses an action at each choice point by examining
+ its alternatives via a state evaluation function.
+
+ The code below is provided as a guide. You are welcome to change
+ it in any way you see fit, so long as you don't touch our method
+ headers.
+ """
+
+
+ def getAction(self, gameState):
+ """
+ You do not need to change this method, but you're welcome to.
+
+ getAction chooses among the best options according to the evaluation function.
+
+ Just like in the previous project, getAction takes a GameState and returns
+ some Directions.X for some X in the set {North, South, West, East, Stop}
+ """
+ # Collect legal moves and successor states
+ legalMoves = gameState.getLegalActions()
+
+ # Choose one of the best actions
+ scores = [self.evaluationFunction(gameState, action) for action in legalMoves]
+ bestScore = max(scores)
+ bestIndices = [index for index in range(len(scores)) if scores[index] == bestScore]
+ chosenIndex = random.choice(bestIndices) # Pick randomly among the best
+
+ "Add more of your code here if you want to"
+
+ return legalMoves[chosenIndex]
+
+ def evaluationFunction(self, currentGameState, action):
+ """
+ Design a better evaluation function here.
+
+ The evaluation function takes in the current and proposed successor
+ GameStates (pacman.py) and returns a number, where higher numbers are better.
+
+ The code below extracts some useful information from the state, like the
+ remaining food (newFood) and Pacman position after moving (newPos).
+ newScaredTimes holds the number of moves that each ghost will remain
+ scared because of Pacman having eaten a power pellet.
+
+ Print out these variables to see what you're getting, then combine them
+ to create a masterful evaluation function.
+ """
+ # Useful information you can extract from a GameState (pacman.py)
+ successorGameState = currentGameState.generatePacmanSuccessor(action)
+ newPos = successorGameState.getPacmanPosition()
+ newFood = successorGameState.getFood()
+ newGhostStates = successorGameState.getGhostStates()
+ newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]
+
+ "*** YOUR CODE HERE ***"
+ closestFoodDistance = float('inf')
+ newFoodList = newFood.asList()
+ #pacman eats a food, no penalty
+ if len(currentGameState.getFood().asList()) > len(newFoodList):
+ closestFoodDistance = 0
+ #pacman won't eat a food in this successor
+ else:
+ for food in newFoodList:
+ if manhattanDistance(newPos, food) < closestFoodDistance:
+ closestFoodDistance = manhattanDistance(newPos, food)
+
+ #penalties for ghosts being too close to pacman
+ ghostModifier = 0
+ for ghost in newGhostStates:
+ #worst case a ghost will kill him (3 ** 2) = 9
+ #best case, ghost is far away (theoretically) ~(3 ** -inf) = 0
+ #on larger maps this will need to be increased
+ ghostModifier += 3 ** (2-manhattanDistance(newPos, ghost.getPosition()))
+
+ #worst case is when pacman is far from food and a ghost will kill him
+ penalty = closestFoodDistance + ghostModifier
+ #the highest score of all successors gets picked, i.e. the best case of this function
+ return -penalty
+
+def scoreEvaluationFunction(currentGameState):
+ """
+ This default evaluation function just returns the score of the state.
+ The score is the same one displayed in the Pacman GUI.
+
+ This evaluation function is meant for use with adversarial search agents
+ (not reflex agents).
+ """
+ return currentGameState.getScore()
+
+class MultiAgentSearchAgent(Agent):
+ """
+ This class provides some common elements to all of your
+ multi-agent searchers. Any methods defined here will be available
+ to the MinimaxPacmanAgent, AlphaBetaPacmanAgent & ExpectimaxPacmanAgent.
+
+ You *do not* need to make any changes here, but you can if you want to
+ add functionality to all your adversarial search agents. Please do not
+ remove anything, however.
+
+ Note: this is an abstract class: one that should not be instantiated. It's
+ only partially specified, and designed to be extended. Agent (game.py)
+ is another abstract class.
+ """
+
+ def __init__(self, evalFn = 'scoreEvaluationFunction', depth = '2'):
+ self.index = 0 # Pacman is always agent index 0
+ self.evaluationFunction = util.lookup(evalFn, globals())
+ self.depth = int(depth)
+
+class MinimaxAgent(MultiAgentSearchAgent):
+ """
+ Your minimax agent (question 2)
+ """
+
+ def getAction(self, gameState):
+ """
+ Returns the minimax action from the current gameState using self.depth
+ and self.evaluationFunction.
+
+ Here are some method calls that might be useful when implementing minimax.
+
+ gameState.getLegalActions(agentIndex):
+ Returns a list of legal actions for an agent
+ agentIndex=0 means Pacman, ghosts are >= 1
+
+ gameState.generateSuccessor(agentIndex, action):
+ Returns the successor game state after an agent takes an action
+
+ gameState.getNumAgents():
+ Returns the total number of agents in the game
+ """
+ "*** YOUR CODE HERE ***"
+ util.raiseNotDefined()
+
+class AlphaBetaAgent(MultiAgentSearchAgent):
+ """
+ Your minimax agent with alpha-beta pruning (question 3)
+ """
+
+ def getAction(self, gameState):
+ """
+ Returns the minimax action using self.depth and self.evaluationFunction
+ """
+ "*** YOUR CODE HERE ***"
+ util.raiseNotDefined()
+
+class ExpectimaxAgent(MultiAgentSearchAgent):
+ """
+ Your expectimax agent (question 4)
+ """
+
+ def getAction(self, gameState):
+ """
+ Returns the expectimax action using self.depth and self.evaluationFunction
+
+ All ghosts should be modeled as choosing uniformly at random from their
+ legal moves.
+ """
+ "*** YOUR CODE HERE ***"
+ util.raiseNotDefined()
+
+def betterEvaluationFunction(currentGameState):
+ """
+ Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
+ evaluation function (question 5).
+
+ DESCRIPTION: <write something here so we know what you did>
+ """
+ "*** YOUR CODE HERE ***"
+ util.raiseNotDefined()
+
+# Abbreviation
+better = betterEvaluationFunction
+
diff --git a/multiagentTestClasses.py b/multiagentTestClasses.py
new file mode 100644
index 0000000..2bea859
--- /dev/null
+++ b/multiagentTestClasses.py
@@ -0,0 +1,529 @@
+# multiagentTestClasses.py
+# ------------------------
+# Licensing Information: You are free to use or extend these projects for
+# educational purposes provided that (1) you do not distribute or publish
+# solutions, (2) you retain this notice, and (3) you provide clear
+# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
+#
+# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
+# The core projects and autograders were primarily created by John DeNero
+# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
+# Student side autograding was added by Brad Miller, Nick Hay, and
+# Pieter Abbeel (pabbeel@cs.berkeley.edu).
+
+
+# A minimax tree which interfaces like gameState
+# state.getNumAgents()
+# state.isWin()
+# state.isLose()
+# state.generateSuccessor(agentIndex, action)
+# state.getScore()
+# used by multiAgents.scoreEvaluationFunction, which is the default
+#
+import testClasses
+import json
+
+from collections import defaultdict
+from pprint import PrettyPrinter
+pp = PrettyPrinter()
+
+from game import Agent
+from pacman import GameState
+from ghostAgents import RandomGhost, DirectionalGhost
+import random, math, traceback, sys, os
+import layout, pacman
+import autograder
+# import grading
+
+VERBOSE = False
+
+class MultiagentTreeState(object):
+ def __init__(self, problem, state):
+ self.problem = problem
+ self.state = state
+
+ def generateSuccessor(self, agentIndex, action):
+ if VERBOSE:
+ print "generateSuccessor(%s, %s, %s) -> %s" % (self.state, agentIndex, action, self.problem.stateToSuccessorMap[self.state][action])
+ successor = self.problem.stateToSuccessorMap[self.state][action]
+ self.problem.generatedStates.add(successor)
+ return MultiagentTreeState(self.problem, successor)
+
+ def getScore(self):
+ if VERBOSE:
+ print "getScore(%s) -> %s" % (self.state, self.problem.evaluation[self.state])
+ if self.state not in self.problem.evaluation:
+ raise Exception('getScore() called on non-terminal state or before maximum depth achieved.')
+ return float(self.problem.evaluation[self.state])
+
+ def getLegalActions(self, agentIndex=0):
+ if VERBOSE:
+ print "getLegalActions(%s) -> %s" % (self.state, self.problem.stateToActions[self.state])
+ #if len(self.problem.stateToActions[self.state]) == 0:
+ # print "WARNING: getLegalActions called on leaf state %s" % (self.state,)
+ return list(self.problem.stateToActions[self.state])
+
+ def isWin(self):
+ if VERBOSE:
+ print "isWin(%s) -> %s" % (self.state, self.state in self.problem.winStates)
+ return self.state in self.problem.winStates
+
+ def isLose(self):
+ if VERBOSE:
+ print "isLose(%s) -> %s" % (self.state, self.state in self.problem.loseStates)
+ return self.state in self.problem.loseStates
+
+ def getNumAgents(self):
+ if VERBOSE:
+ print "getNumAgents(%s) -> %s" % (self.state, self.problem.numAgents)
+ return self.problem.numAgents
+
+
+class MultiagentTreeProblem(object):
+ def __init__(self, numAgents, startState, winStates, loseStates, successors, evaluation):
+ self.startState = MultiagentTreeState(self, startState)
+
+ self.numAgents = numAgents
+ self.winStates = winStates
+ self.loseStates = loseStates
+ self.evaluation = evaluation
+ self.successors = successors
+
+ self.reset()
+
+ self.stateToSuccessorMap = defaultdict(dict)
+ self.stateToActions = defaultdict(list)
+ for state, action, nextState in successors:
+ self.stateToActions[state].append(action)
+ self.stateToSuccessorMap[state][action] = nextState
+
+ def reset(self):
+ self.generatedStates = set([self.startState.state])
+
+
+def parseTreeProblem(testDict):
+ numAgents = int(testDict["num_agents"])
+ startState = testDict["start_state"]
+ winStates = set(testDict["win_states"].split(" "))
+ loseStates = set(testDict["lose_states"].split(" "))
+ successors = []
+
+ evaluation = {}
+ for line in testDict["evaluation"].split('\n'):
+ tokens = line.split()
+ if len(tokens) == 2:
+ state, value = tokens
+ evaluation[state] = float(value)
+ else:
+ raise Exception, "[parseTree] Bad evaluation line: |%s|" % (line,)
+
+ for line in testDict["successors"].split('\n'):
+ tokens = line.split()
+ if len(tokens) == 3:
+ state, action, nextState = tokens
+ successors.append((state, action, nextState))
+ else:
+ raise Exception, "[parseTree] Bad successor line: |%s|" % (line,)
+
+ return MultiagentTreeProblem(numAgents, startState, winStates, loseStates, successors, evaluation)
+
+
+
+def run(lay, layName, pac, ghosts, disp, nGames=1, name='games'):
+ """
+ Runs a few games and outputs their statistics.
+ """
+ starttime = time.time()
+ print '*** Running %s on' % name, layName, '%d time(s).' % nGames
+ games = pacman.runGames(lay, pac, ghosts, disp, nGames, False, catchExceptions=True, timeout=120)
+ print '*** Finished running %s on' % name, layName, 'after %d seconds.' % (time.time() - starttime)
+ stats = {'time': time.time() - starttime, 'wins': [g.state.isWin() for g in games].count(True), 'games': games, 'scores': [g.state.getScore() for g in games],
+ 'timeouts': [g.agentTimeout for g in games].count(True), 'crashes': [g.agentCrashed for g in games].count(True)}
+ print '*** Won %d out of %d games. Average score: %f ***' % (stats['wins'], len(games), sum(stats['scores']) * 1.0 / len(games))
+ return stats
+
+class GradingAgent(Agent):
+ def __init__(self, seed, studentAgent, optimalActions, altDepthActions, partialPlyBugActions):
+ # save student agent and actions of refernce agents
+ self.studentAgent = studentAgent
+ self.optimalActions = optimalActions
+ self.altDepthActions = altDepthActions
+ self.partialPlyBugActions = partialPlyBugActions
+ # create fields for storing specific wrong actions
+ self.suboptimalMoves = []
+ self.wrongStatesExplored = -1
+ # boolean vectors represent types of implementation the student could have
+ self.actionsConsistentWithOptimal = [True for i in range(len(optimalActions[0]))]
+ self.actionsConsistentWithAlternativeDepth = [True for i in range(len(altDepthActions[0]))]
+ self.actionsConsistentWithPartialPlyBug = [True for i in range(len(partialPlyBugActions[0]))]
+ # keep track of elapsed moves
+ self.stepCount = 0
+ self.seed = seed
+
+ def registerInitialState(self, state):
+ if 'registerInitialState' in dir(self.studentAgent):
+ self.studentAgent.registerInitialState(state)
+ random.seed(self.seed)
+
+ def getAction(self, state):
+ GameState.getAndResetExplored()
+ studentAction = (self.studentAgent.getAction(state), len(GameState.getAndResetExplored()))
+ optimalActions = self.optimalActions[self.stepCount]
+ altDepthActions = self.altDepthActions[self.stepCount]
+ partialPlyBugActions = self.partialPlyBugActions[self.stepCount]
+ studentOptimalAction = False
+ curRightStatesExplored = False;
+ for i in range(len(optimalActions)):
+ if studentAction[0] in optimalActions[i][0]:
+ studentOptimalAction = True
+ else:
+ self.actionsConsistentWithOptimal[i] = False
+ if studentAction[1] == int(optimalActions[i][1]):
+ curRightStatesExplored = True
+ if not curRightStatesExplored and self.wrongStatesExplored < 0:
+ self.wrongStatesExplored = 1
+ for i in range(len(altDepthActions)):
+ if studentAction[0] not in altDepthActions[i]:
+ self.actionsConsistentWithAlternativeDepth[i] = False
+ for i in range(len(partialPlyBugActions)):
+ if studentAction[0] not in partialPlyBugActions[i]:
+ self.actionsConsistentWithPartialPlyBug[i] = False
+ if not studentOptimalAction:
+ self.suboptimalMoves.append((state, studentAction[0], optimalActions[0][0][0]))
+ self.stepCount += 1
+ random.seed(self.seed + self.stepCount)
+ return optimalActions[0][0][0]
+
+ def getSuboptimalMoves(self):
+ return self.suboptimalMoves
+
+ def getWrongStatesExplored(self):
+ return self.wrongStatesExplored
+
+ def checkFailure(self):
+ """
+ Return +n if have n suboptimal moves.
+ Return -1 if have only off by one depth moves.
+ Return 0 otherwise.
+ """
+ if self.wrongStatesExplored > 0:
+ return -3
+ if self.actionsConsistentWithOptimal.count(True) > 0:
+ return 0
+ elif self.actionsConsistentWithPartialPlyBug.count(True) > 0:
+ return -2
+ elif self.actionsConsistentWithAlternativeDepth.count(True) > 0:
+ return -1
+ else:
+ return len(self.suboptimalMoves)
+
+
+class PolyAgent(Agent):
+ def __init__(self, seed, multiAgents, ourPacOptions, depth):
+ # prepare our pacman agents
+ solutionAgents, alternativeDepthAgents, partialPlyBugAgents = self.construct_our_pacs(multiAgents, ourPacOptions)
+ for p in solutionAgents:
+ p.depth = depth
+ for p in partialPlyBugAgents:
+ p.depth = depth
+ for p in alternativeDepthAgents[:2]:
+ p.depth = max(1, depth - 1)
+ for p in alternativeDepthAgents[2:]:
+ p.depth = depth + 1
+ self.solutionAgents = solutionAgents
+ self.alternativeDepthAgents = alternativeDepthAgents
+ self.partialPlyBugAgents = partialPlyBugAgents
+ # prepare fields for storing the results
+ self.optimalActionLists = []
+ self.alternativeDepthLists = []
+ self.partialPlyBugLists = []
+ self.seed = seed
+ self.stepCount = 0
+
+ def select(self, list, indices):
+ """
+ Return a sublist of elements given by indices in list.
+ """
+ return [list[i] for i in indices]
+
+ def construct_our_pacs(self, multiAgents, keyword_dict):
+ pacs_without_stop = [multiAgents.StaffMultiAgentSearchAgent(**keyword_dict) for i in range(3)]
+ keyword_dict['keepStop'] = 'True'
+ pacs_with_stop = [multiAgents.StaffMultiAgentSearchAgent(**keyword_dict) for i in range(3)]
+ keyword_dict['usePartialPlyBug'] = 'True'
+ partial_ply_bug_pacs = [multiAgents.StaffMultiAgentSearchAgent(**keyword_dict)]
+ keyword_dict['keepStop'] = 'False'
+ partial_ply_bug_pacs = partial_ply_bug_pacs + [multiAgents.StaffMultiAgentSearchAgent(**keyword_dict)]
+ for pac in pacs_with_stop + pacs_without_stop + partial_ply_bug_pacs:
+ pac.verbose = False
+ ourpac = [pacs_with_stop[0], pacs_without_stop[0]]
+ alternative_depth_pacs = self.select(pacs_with_stop + pacs_without_stop, [1, 4, 2, 5])
+ return (ourpac, alternative_depth_pacs, partial_ply_bug_pacs)
+
+ def registerInitialState(self, state):
+ for agent in self.solutionAgents + self.alternativeDepthAgents:
+ if 'registerInitialState' in dir(agent):
+ agent.registerInitialState(state)
+ random.seed(self.seed)
+
+ def getAction(self, state):
+ # survey agents
+ GameState.getAndResetExplored()
+ optimalActionLists = []
+ for agent in self.solutionAgents:
+ optimalActionLists.append((agent.getBestPacmanActions(state)[0], len(GameState.getAndResetExplored())))
+ alternativeDepthLists = [agent.getBestPacmanActions(state)[0] for agent in self.alternativeDepthAgents]
+ partialPlyBugLists = [agent.getBestPacmanActions(state)[0] for agent in self.partialPlyBugAgents]
+ # record responses
+ self.optimalActionLists.append(optimalActionLists)
+ self.alternativeDepthLists.append(alternativeDepthLists)
+ self.partialPlyBugLists.append(partialPlyBugLists)
+ self.stepCount += 1
+ random.seed(self.seed + self.stepCount)
+ return optimalActionLists[0][0][0]
+
+ def getTraces(self):
+ # return traces from individual agents
+ return (self.optimalActionLists, self.alternativeDepthLists, self.partialPlyBugLists)
+
+class PacmanGameTreeTest(testClasses.TestCase):
+
+ def __init__(self, question, testDict):
+ super(PacmanGameTreeTest, self).__init__(question, testDict)
+ self.seed = int(self.testDict['seed'])
+ self.alg = self.testDict['alg']
+ self.layout_text = self.testDict['layout']
+ self.layout_name = self.testDict['layoutName']
+ self.depth = int(self.testDict['depth'])
+ self.max_points = int(self.testDict['max_points'])
+
+ def execute(self, grades, moduleDict, solutionDict):
+ # load student code and staff code solutions
+ multiAgents = moduleDict['multiAgents']
+ studentAgent = getattr(multiAgents, self.alg)(depth=self.depth)
+ allActions = map(lambda x: json.loads(x), solutionDict['optimalActions'].split('\n'))
+ altDepthActions = map(lambda x: json.loads(x), solutionDict['altDepthActions'].split('\n'))
+ partialPlyBugActions = map(lambda x: json.loads(x), solutionDict['partialPlyBugActions'].split('\n'))
+ # set up game state and play a game
+ random.seed(self.seed)
+ lay = layout.Layout([l.strip() for l in self.layout_text.split('\n')])
+ pac = GradingAgent(self.seed, studentAgent, allActions, altDepthActions, partialPlyBugActions)
+ # check return codes and assign grades
+ disp = self.question.getDisplay()
+ stats = run(lay, self.layout_name, pac, [DirectionalGhost(i + 1) for i in range(2)], disp, name=self.alg)
+ if stats['timeouts'] > 0:
+ self.addMessage('Agent timed out on smallClassic. No credit')
+ return self.testFail(grades)
+ if stats['crashes'] > 0:
+ self.addMessage('Agent crashed on smallClassic. No credit')
+ return self.testFail(grades)
+ code = pac.checkFailure()
+ if code == 0:
+ return self.testPass(grades)
+ elif code == -3:
+ if pac.getWrongStatesExplored() >=0:
+ self.addMessage('Bug: Wrong number of states expanded.')
+ return self.testFail(grades)
+ else:
+ return self.testPass(grades)
+ elif code == -2:
+ self.addMessage('Bug: Partial Ply Bug')
+ return self.testFail(grades)
+ elif code == -1:
+ self.addMessage('Bug: Search depth off by 1')
+ return self.testFail(grades)
+ elif code > 0:
+ moves = pac.getSuboptimalMoves()
+ state, studentMove, optMove = random.choice(moves)
+ self.addMessage('Bug: Suboptimal moves')
+ self.addMessage('State:%s\nStudent Move:%s\nOptimal Move:%s' % (state, studentMove, optMove))
+ return self.testFail(grades)
+
+ def writeList(self, handle, name, list):
+ handle.write('%s: """\n' % name)
+ for l in list:
+ handle.write('%s\n' % json.dumps(l))
+ handle.write('"""\n')
+
+ def writeSolution(self, moduleDict, filePath):
+ # load module, set seed, create ghosts and macman, run game
+ multiAgents = moduleDict['multiAgents']
+ random.seed(self.seed)
+ lay = layout.Layout([l.strip() for l in self.layout_text.split('\n')])
+ if self.alg == 'ExpectimaxAgent':
+ ourPacOptions = {'expectimax': 'True'}
+ elif self.alg == 'AlphaBetaAgent':
+ ourPacOptions = {'alphabeta': 'True'}
+ else:
+ ourPacOptions = {}
+ pac = PolyAgent(self.seed, multiAgents, ourPacOptions, self.depth)
+ disp = self.question.getDisplay()
+ run(lay, self.layout_name, pac, [DirectionalGhost(i + 1) for i in range(2)], disp, name=self.alg)
+ (optimalActions, altDepthActions, partialPlyBugActions) = pac.getTraces()
+ # recover traces and record to file
+ handle = open(filePath, 'w')
+ self.writeList(handle, 'optimalActions', optimalActions)
+ self.writeList(handle, 'altDepthActions', altDepthActions)
+ self.writeList(handle, 'partialPlyBugActions', partialPlyBugActions)
+ handle.close()
+
+
+
+class GraphGameTreeTest(testClasses.TestCase):
+
+ def __init__(self, question, testDict):
+ super(GraphGameTreeTest, self).__init__(question, testDict)
+ self.problem = parseTreeProblem(testDict)
+ self.alg = self.testDict['alg']
+ self.diagram = self.testDict['diagram'].split('\n')
+ self.depth = int(self.testDict['depth'])
+
+ def solveProblem(self, multiAgents):
+ self.problem.reset()
+ studentAgent = getattr(multiAgents, self.alg)(depth=self.depth)
+ action = studentAgent.getAction(self.problem.startState)
+ generated = self.problem.generatedStates
+ return action, " ".join([str(s) for s in sorted(generated)])
+
+ def addDiagram(self):
+ self.addMessage('Tree:')
+ for line in self.diagram:
+ self.addMessage(line)
+
+ def execute(self, grades, moduleDict, solutionDict):
+ multiAgents = moduleDict['multiAgents']
+ goldAction = solutionDict['action']
+ goldGenerated = solutionDict['generated']
+ action, generated = self.solveProblem(multiAgents)
+
+ fail = False
+ if action != goldAction:
+ self.addMessage('Incorrect move for depth=%s' % (self.depth,))
+ self.addMessage(' Student move: %s\n Optimal move: %s' % (action, goldAction))
+ fail = True
+
+ if generated != goldGenerated:
+ self.addMessage('Incorrect generated nodes for depth=%s' % (self.depth,))
+ self.addMessage(' Student generated nodes: %s\n Correct generated nodes: %s' % (generated, goldGenerated))
+ fail = True
+
+ if fail:
+ self.addDiagram()
+ return self.testFail(grades)
+ else:
+ return self.testPass(grades)
+
+ def writeSolution(self, moduleDict, filePath):
+ multiAgents = moduleDict['multiAgents']
+ action, generated = self.solveProblem(multiAgents)
+ with open(filePath, 'w') as handle:
+ handle.write('# This is the solution file for %s.\n' % self.path)
+ handle.write('action: "%s"\n' % (action,))
+ handle.write('generated: "%s"\n' % (generated,))
+ return True
+
+
+import time
+from util import TimeoutFunction
+
+
+class EvalAgentTest(testClasses.TestCase):
+
+ def __init__(self, question, testDict):
+ super(EvalAgentTest, self).__init__(question, testDict)
+ self.layoutName = testDict['layoutName']
+ self.agentName = testDict['agentName']
+ self.ghosts = eval(testDict['ghosts'])
+ self.maxTime = int(testDict['maxTime'])
+ self.seed = int(testDict['randomSeed'])
+ self.numGames = int(testDict['numGames'])
+
+ self.scoreMinimum = int(testDict['scoreMinimum']) if 'scoreMinimum' in testDict else None
+ self.nonTimeoutMinimum = int(testDict['nonTimeoutMinimum']) if 'nonTimeoutMinimum' in testDict else None
+ self.winsMinimum = int(testDict['winsMinimum']) if 'winsMinimum' in testDict else None
+
+ self.scoreThresholds = [int(s) for s in testDict.get('scoreThresholds','').split()]
+ self.nonTimeoutThresholds = [int(s) for s in testDict.get('nonTimeoutThresholds','').split()]
+ self.winsThresholds = [int(s) for s in testDict.get('winsThresholds','').split()]
+
+ self.maxPoints = sum([len(t) for t in [self.scoreThresholds, self.nonTimeoutThresholds, self.winsThresholds]])
+ self.agentArgs = testDict.get('agentArgs', '')
+
+
+ def execute(self, grades, moduleDict, solutionDict):
+ startTime = time.time()
+
+ agentType = getattr(moduleDict['multiAgents'], self.agentName)
+ agentOpts = pacman.parseAgentArgs(self.agentArgs) if self.agentArgs != '' else {}
+ agent = agentType(**agentOpts)
+
+ lay = layout.getLayout(self.layoutName, 3)
+
+ disp = self.question.getDisplay()
+
+ random.seed(self.seed)
+ games = pacman.runGames(lay, agent, self.ghosts, disp, self.numGames, False, catchExceptions=True, timeout=self.maxTime)
+ totalTime = time.time() - startTime
+
+ stats = {'time': totalTime, 'wins': [g.state.isWin() for g in games].count(True),
+ 'games': games, 'scores': [g.state.getScore() for g in games],
+ 'timeouts': [g.agentTimeout for g in games].count(True), 'crashes': [g.agentCrashed for g in games].count(True)}
+
+ averageScore = sum(stats['scores']) / float(len(stats['scores']))
+ nonTimeouts = self.numGames - stats['timeouts']
+ wins = stats['wins']
+
+ def gradeThreshold(value, minimum, thresholds, name):
+ points = 0
+ passed = (minimum == None) or (value >= minimum)
+ if passed:
+ for t in thresholds:
+ if value >= t:
+ points += 1
+ return (passed, points, value, minimum, thresholds, name)
+
+ results = [gradeThreshold(averageScore, self.scoreMinimum, self.scoreThresholds, "average score"),
+ gradeThreshold(nonTimeouts, self.nonTimeoutMinimum, self.nonTimeoutThresholds, "games not timed out"),
+ gradeThreshold(wins, self.winsMinimum, self.winsThresholds, "wins")]
+
+ totalPoints = 0
+ for passed, points, value, minimum, thresholds, name in results:
+ if minimum == None and len(thresholds)==0:
+ continue
+
+ # print passed, points, value, minimum, thresholds, name
+ totalPoints += points
+ if not passed:
+ assert points == 0
+ self.addMessage("%s %s (fail: below minimum value %s)" % (value, name, minimum))
+ else:
+ self.addMessage("%s %s (%s of %s points)" % (value, name, points, len(thresholds)))
+
+ if minimum != None:
+ self.addMessage(" Grading scheme:")
+ self.addMessage(" < %s: fail" % (minimum,))
+ if len(thresholds)==0 or minimum != thresholds[0]:
+ self.addMessage(" >= %s: 0 points" % (minimum,))
+ for idx, threshold in enumerate(thresholds):
+ self.addMessage(" >= %s: %s points" % (threshold, idx+1))
+ elif len(thresholds) > 0:
+ self.addMessage(" Grading scheme:")
+ self.addMessage(" < %s: 0 points" % (thresholds[0],))
+ for idx, threshold in enumerate(thresholds):
+ self.addMessage(" >= %s: %s points" % (threshold, idx+1))
+
+ if any([not passed for passed, _, _, _, _, _ in results]):
+ totalPoints = 0
+
+ return self.testPartial(grades, totalPoints, self.maxPoints)
+
+ def writeSolution(self, moduleDict, filePath):
+ handle = open(filePath, 'w')
+ handle.write('# This is the solution file for %s.\n' % self.path)
+ handle.write('# File intentionally blank.\n')
+ handle.close()
+ return True
+
+
+
+
diff --git a/pacman.py b/pacman.py
new file mode 100644
index 0000000..740451d
--- /dev/null
+++ b/pacman.py
@@ -0,0 +1,684 @@
+# pacman.py
+# ---------
+# Licensing Information: You are free to use or extend these projects for
+# educational purposes provided that (1) you do not distribute or publish
+# solutions, (2) you retain this notice, and (3) you provide clear
+# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
+#
+# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
+# The core projects and autograders were primarily created by John DeNero
+# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
+# Student side autograding was added by Brad Miller, Nick Hay, and
+# Pieter Abbeel (pabbeel@cs.berkeley.edu).
+
+
+"""
+Pacman.py holds the logic for the classic pacman game along with the main
+code to run a game. This file is divided into three sections:
+
+ (i) Your interface to the pacman world:
+ Pacman is a complex environment. You probably don't want to
+ read through all of the code we wrote to make the game runs
+ correctly. This section contains the parts of the code
+ that you will need to understand in order to complete the
+ project. There is also some code in game.py that you should
+ understand.
+
+ (ii) The hidden secrets of pacman:
+ This section contains all of the logic code that the pacman
+ environment uses to decide who can move where, who dies when
+ things collide, etc. You shouldn't need to read this section
+ of code, but you can if you want.
+
+ (iii) Framework to start a game:
+ The final section contains the code for reading the command
+ you use to set up the game, then starting up a new game, along with
+ linking in all the external parts (agent functions, graphics).
+ Check this section out to see all the options available to you.
+
+To play your first game, type 'python pacman.py' from the command line.
+The keys are 'a', 's', 'd', and 'w' to move (or arrow keys). Have fun!
+"""
+from game import GameStateData
+from game import Game
+from game import Directions
+from game import Actions
+from util import nearestPoint
+from util import manhattanDistance
+import util, layout
+import sys, types, time, random, os
+
+###################################################
+# YOUR INTERFACE TO THE PACMAN WORLD: A GameState #
+###################################################
+
+class GameState:
+ """
+ A GameState specifies the full game state, including the food, capsules,
+ agent configurations and score changes.
+
+ GameStates are used by the Game object to capture the actual state of the game and
+ can be used by agents to reason about the game.
+
+ Much of the information in a GameState is stored in a GameStateData object. We
+ strongly suggest that you access that data via the accessor methods below rather
+ than referring to the GameStateData object directly.
+
+ Note that in classic Pacman, Pacman is always agent 0.
+ """
+
+ ####################################################
+ # Accessor methods: use these to access state data #
+ ####################################################
+
+ # static variable keeps track of which states have had getLegalActions called
+ explored = set()
+ def getAndResetExplored():
+ tmp = GameState.explored.copy()
+ GameState.explored = set()
+ return tmp
+ getAndResetExplored = staticmethod(getAndResetExplored)
+
+ def getLegalActions( self, agentIndex=0 ):
+ """
+ Returns the legal actions for the agent specified.
+ """
+# GameState.explored.add(self)
+ if self.isWin() or self.isLose(): return []
+
+ if agentIndex == 0: # Pacman is moving
+ return PacmanRules.getLegalActions( self )
+ else:
+ return GhostRules.getLegalActions( self, agentIndex )
+
+ def generateSuccessor( self, agentIndex, action):
+ """
+ Returns the successor state after the specified agent takes the action.
+ """
+ # Check that successors exist
+ if self.isWin() or self.isLose(): raise Exception('Can\'t generate a successor of a terminal state.')
+
+ # Copy current state
+ state = GameState(self)
+
+ # Let agent's logic deal with its action's effects on the board
+ if agentIndex == 0: # Pacman is moving
+ state.data._eaten = [False for i in range(state.getNumAgents())]
+ PacmanRules.applyAction( state, action )
+ else: # A ghost is moving
+ GhostRules.applyAction( state, action, agentIndex )
+
+ # Time passes
+ if agentIndex == 0:
+ state.data.scoreChange += -TIME_PENALTY # Penalty for waiting around
+ else:
+ GhostRules.decrementTimer( state.data.agentStates[agentIndex] )
+
+ # Resolve multi-agent effects
+ GhostRules.checkDeath( state, agentIndex )
+
+ # Book keeping
+ state.data._agentMoved = agentIndex
+ state.data.score += state.data.scoreChange
+ GameState.explored.add(self)
+ GameState.explored.add(state)
+ return state
+
+ def getLegalPacmanActions( self ):
+ return self.getLegalActions( 0 )
+
+ def generatePacmanSuccessor( self, action ):
+ """
+ Generates the successor state after the specified pacman move
+ """
+ return self.generateSuccessor( 0, action )
+
+ def getPacmanState( self ):
+ """
+ Returns an AgentState object for pacman (in game.py)
+
+ state.pos gives the current position
+ state.direction gives the travel vector
+ """
+ return self.data.agentStates[0].copy()
+
+ def getPacmanPosition( self ):
+ return self.data.agentStates[0].getPosition()
+
+ def getGhostStates( self ):
+ return self.data.agentStates[1:]
+
+ def getGhostState( self, agentIndex ):
+ if agentIndex == 0 or agentIndex >= self.getNumAgents():
+ raise Exception("Invalid index passed to getGhostState")
+ return self.data.agentStates[agentIndex]
+
+ def getGhostPosition( self, agentIndex ):
+ if agentIndex == 0:
+ raise Exception("Pacman's index passed to getGhostPosition")
+ return self.data.agentStates[agentIndex].getPosition()
+
+ def getGhostPositions(self):
+ return [s.getPosition() for s in self.getGhostStates()]
+
+ def getNumAgents( self ):
+ return len( self.data.agentStates )
+
+ def getScore( self ):
+ return float(self.data.score)
+
+ def getCapsules(self):
+ """
+ Returns a list of positions (x,y) of the remaining capsules.
+ """
+ return self.data.capsules
+
+ def getNumFood( self ):
+ return self.data.food.count()
+
+ def getFood(self):
+ """
+ Returns a Grid of boolean food indicator variables.
+
+ Grids can be accessed via list notation, so to check
+ if there is food at (x,y), just call
+
+ currentFood = state.getFood()
+ if currentFood[x][y] == True: ...
+ """
+ return self.data.food
+
+ def getWalls(self):
+ """
+ Returns a Grid of boolean wall indicator variables.
+
+ Grids can be accessed via list notation, so to check
+ if there is a wall at (x,y), just call
+
+ walls = state.getWalls()
+ if walls[x][y] == True: ...
+ """
+ return self.data.layout.walls
+
+ def hasFood(self, x, y):
+ return self.data.food[x][y]
+
+ def hasWall(self, x, y):
+ return self.data.layout.walls[x][y]
+
+ def isLose( self ):
+ return self.data._lose
+
+ def isWin( self ):
+ return self.data._win
+
+ #############################################
+ # Helper methods: #
+ # You shouldn't need to call these directly #
+ #############################################
+
+ def __init__( self, prevState = None ):
+ """
+ Generates a new state by copying information from its predecessor.
+ """
+ if prevState != None: # Initial state
+ self.data = GameStateData(prevState.data)
+ else:
+ self.data = GameStateData()
+
+ def deepCopy( self ):
+ state = GameState( self )
+ state.data = self.data.deepCopy()
+ return state
+
+ def __eq__( self, other ):
+ """
+ Allows two states to be compared.
+ """
+ return hasattr(other, 'data') and self.data == other.data
+
+ def __hash__( self ):
+ """
+ Allows states to be keys of dictionaries.
+ """
+ return hash( self.data )
+
+ def __str__( self ):
+
+ return str(self.data)
+
+ def initialize( self, layout, numGhostAgents=1000 ):
+ """
+ Creates an initial game state from a layout array (see layout.py).
+ """
+ self.data.initialize(layout, numGhostAgents)
+
+############################################################################
+# THE HIDDEN SECRETS OF PACMAN #
+# #
+# You shouldn't need to look through the code in this section of the file. #
+############################################################################
+
+SCARED_TIME = 40 # Moves ghosts are scared
+COLLISION_TOLERANCE = 0.7 # How close ghosts must be to Pacman to kill
+TIME_PENALTY = 1 # Number of points lost each round
+
+class ClassicGameRules:
+ """
+ These game rules manage the control flow of a game, deciding when
+ and how the game starts and ends.
+ """
+ def __init__(self, timeout=30):
+ self.timeout = timeout
+
+ def newGame( self, layout, pacmanAgent, ghostAgents, display, quiet = False, catchExceptions=False):
+ agents = [pacmanAgent] + ghostAgents[:layout.getNumGhosts()]
+ initState = GameState()
+ initState.initialize( layout, len(ghostAgents) )
+ game = Game(agents, display, self, catchExceptions=catchExceptions)
+ game.state = initState
+ self.initialState = initState.deepCopy()
+ self.quiet = quiet
+ return game
+
+ def process(self, state, game):
+ """
+ Checks to see whether it is time to end the game.
+ """
+ if state.isWin(): self.win(state, game)
+ if state.isLose(): self.lose(state, game)
+
+ def win( self, state, game ):
+ if not self.quiet: print "Pacman emerges victorious! Score: %d" % state.data.score
+ game.gameOver = True
+
+ def lose( self, state, game ):
+ if not self.quiet: print "Pacman died! Score: %d" % state.data.score
+ game.gameOver = True
+
+ def getProgress(self, game):
+ return float(game.state.getNumFood()) / self.initialState.getNumFood()
+
+ def agentCrash(self, game, agentIndex):
+ if agentIndex == 0:
+ print "Pacman crashed"
+ else:
+ print "A ghost crashed"
+
+ def getMaxTotalTime(self, agentIndex):
+ return self.timeout
+
+ def getMaxStartupTime(self, agentIndex):
+ return self.timeout
+
+ def getMoveWarningTime(self, agentIndex):
+ return self.timeout
+
+ def getMoveTimeout(self, agentIndex):
+ return self.timeout
+
+ def getMaxTimeWarnings(self, agentIndex):
+ return 0
+
+class PacmanRules:
+ """
+ These functions govern how pacman interacts with his environment under
+ the classic game rules.
+ """
+ PACMAN_SPEED=1
+
+ def getLegalActions( state ):
+ """
+ Returns a list of possible actions.
+ """
+ return Actions.getPossibleActions( state.getPacmanState().configuration, state.data.layout.walls )
+ getLegalActions = staticmethod( getLegalActions )
+
+ def applyAction( state, action ):
+ """
+ Edits the state to reflect the results of the action.
+ """
+ legal = PacmanRules.getLegalActions( state )
+ if action not in legal:
+ raise Exception("Illegal action " + str(action))
+
+ pacmanState = state.data.agentStates[0]
+
+ # Update Configuration
+ vector = Actions.directionToVector( action, PacmanRules.PACMAN_SPEED )
+ pacmanState.configuration = pacmanState.configuration.generateSuccessor( vector )
+
+ # Eat
+ next = pacmanState.configuration.getPosition()
+ nearest = nearestPoint( next )
+ if manhattanDistance( nearest, next ) <= 0.5 :
+ # Remove food
+ PacmanRules.consume( nearest, state )
+ applyAction = staticmethod( applyAction )
+
+ def consume( position, state ):
+ x,y = position
+ # Eat food
+ if state.data.food[x][y]:
+ state.data.scoreChange += 10
+ state.data.food = state.data.food.copy()
+ state.data.food[x][y] = False
+ state.data._foodEaten = position
+ # TODO: cache numFood?
+ numFood = state.getNumFood()
+ if numFood == 0 and not state.data._lose:
+ state.data.scoreChange += 500
+ state.data._win = True
+ # Eat capsule
+ if( position in state.getCapsules() ):
+ state.data.capsules.remove( position )
+ state.data._capsuleEaten = position
+ # Reset all ghosts' scared timers
+ for index in range( 1, len( state.data.agentStates ) ):
+ state.data.agentStates[index].scaredTimer = SCARED_TIME
+ consume = staticmethod( consume )
+
+class GhostRules:
+ """
+ These functions dictate how ghosts interact with their environment.
+ """
+ GHOST_SPEED=1.0
+ def getLegalActions( state, ghostIndex ):
+ """
+ Ghosts cannot stop, and cannot turn around unless they
+ reach a dead end, but can turn 90 degrees at intersections.
+ """
+ conf = state.getGhostState( ghostIndex ).configuration
+ possibleActions = Actions.getPossibleActions( conf, state.data.layout.walls )
+ reverse = Actions.reverseDirection( conf.direction )
+ if Directions.STOP in possibleActions:
+ possibleActions.remove( Directions.STOP )
+ if reverse in possibleActions and len( possibleActions ) > 1:
+ possibleActions.remove( reverse )
+ return possibleActions
+ getLegalActions = staticmethod( getLegalActions )
+
+ def applyAction( state, action, ghostIndex):
+
+ legal = GhostRules.getLegalActions( state, ghostIndex )
+ if action not in legal:
+ raise Exception("Illegal ghost action " + str(action))
+
+ ghostState = state.data.agentStates[ghostIndex]
+ speed = GhostRules.GHOST_SPEED
+ if ghostState.scaredTimer > 0: speed /= 2.0
+ vector = Actions.directionToVector( action, speed )
+ ghostState.configuration = ghostState.configuration.generateSuccessor( vector )
+ applyAction = staticmethod( applyAction )
+
+ def decrementTimer( ghostState):
+ timer = ghostState.scaredTimer
+ if timer == 1:
+ ghostState.configuration.pos = nearestPoint( ghostState.configuration.pos )
+ ghostState.scaredTimer = max( 0, timer - 1 )
+ decrementTimer = staticmethod( decrementTimer )
+
+ def checkDeath( state, agentIndex):
+ pacmanPosition = state.getPacmanPosition()
+ if agentIndex == 0: # Pacman just moved; Anyone can kill him
+ for index in range( 1, len( state.data.agentStates ) ):
+ ghostState = state.data.agentStates[index]
+ ghostPosition = ghostState.configuration.getPosition()
+ if GhostRules.canKill( pacmanPosition, ghostPosition ):
+ GhostRules.collide( state, ghostState, index )
+ else:
+ ghostState = state.data.agentStates[agentIndex]
+ ghostPosition = ghostState.configuration.getPosition()
+ if GhostRules.canKill( pacmanPosition, ghostPosition ):
+ GhostRules.collide( state, ghostState, agentIndex )
+ checkDeath = staticmethod( checkDeath )
+
+ def collide( state, ghostState, agentIndex):
+ if ghostState.scaredTimer > 0:
+ state.data.scoreChange += 200
+ GhostRules.placeGhost(state, ghostState)
+ ghostState.scaredTimer = 0
+ # Added for first-person
+ state.data._eaten[agentIndex] = True
+ else:
+ if not state.data._win:
+ state.data.scoreChange -= 500
+ state.data._lose = True
+ collide = staticmethod( collide )
+
+ def canKill( pacmanPosition, ghostPosition ):
+ return manhattanDistance( ghostPosition, pacmanPosition ) <= COLLISION_TOLERANCE
+ canKill = staticmethod( canKill )
+
+ def placeGhost(state, ghostState):
+ ghostState.configuration = ghostState.start
+ placeGhost = staticmethod( placeGhost )
+
+#############################
+# FRAMEWORK TO START A GAME #
+#############################
+
+def default(str):
+ return str + ' [Default: %default]'
+
+def parseAgentArgs(str):
+ if str == None: return {}
+ pieces = str.split(',')
+ opts = {}
+ for p in pieces:
+ if '=' in p:
+ key, val = p.split('=')
+ else:
+ key,val = p, 1
+ opts[key] = val
+ return opts
+
+def readCommand( argv ):
+ """
+ Processes the command used to run pacman from the command line.
+ """
+ from optparse import OptionParser
+ usageStr = """
+ USAGE: python pacman.py <options>
+ EXAMPLES: (1) python pacman.py
+ - starts an interactive game
+ (2) python pacman.py --layout smallClassic --zoom 2
+ OR python pacman.py -l smallClassic -z 2
+ - starts an interactive game on a smaller board, zoomed in
+ """
+ parser = OptionParser(usageStr)
+
+ parser.add_option('-n', '--numGames', dest='numGames', type='int',
+ help=default('the number of GAMES to play'), metavar='GAMES', default=1)
+ parser.add_option('-l', '--layout', dest='layout',
+ help=default('the LAYOUT_FILE from which to load the map layout'),
+ metavar='LAYOUT_FILE', default='mediumClassic')
+ parser.add_option('-p', '--pacman', dest='pacman',
+ help=default('the agent TYPE in the pacmanAgents module to use'),
+ metavar='TYPE', default='KeyboardAgent')
+ parser.add_option('-t', '--textGraphics', action='store_true', dest='textGraphics',
+ help='Display output as text only', default=False)
+ parser.add_option('-q', '--quietTextGraphics', action='store_true', dest='quietGraphics',
+ help='Generate minimal output and no graphics', default=False)
+ parser.add_option('-g', '--ghosts', dest='ghost',
+ help=default('the ghost agent TYPE in the ghostAgents module to use'),
+ metavar = 'TYPE', default='RandomGhost')
+ parser.add_option('-k', '--numghosts', type='int', dest='numGhosts',
+ help=default('The maximum number of ghosts to use'), default=4)
+ parser.add_option('-z', '--zoom', type='float', dest='zoom',
+ help=default('Zoom the size of the graphics window'), default=1.0)
+ parser.add_option('-f', '--fixRandomSeed', action='store_true', dest='fixRandomSeed',
+ help='Fixes the random seed to always play the same game', default=False)
+ parser.add_option('-r', '--recordActions', action='store_true', dest='record',
+ help='Writes game histories to a file (named by the time they were played)', default=False)
+ parser.add_option('--replay', dest='gameToReplay',
+ help='A recorded game file (pickle) to replay', default=None)
+ parser.add_option('-a','--agentArgs',dest='agentArgs',
+ help='Comma separated values sent to agent. e.g. "opt1=val1,opt2,opt3=val3"')
+ parser.add_option('-x', '--numTraining', dest='numTraining', type='int',
+ help=default('How many episodes are training (suppresses output)'), default=0)
+ parser.add_option('--frameTime', dest='frameTime', type='float',
+ help=default('Time to delay between frames; <0 means keyboard'), default=0.1)
+ parser.add_option('-c', '--catchExceptions', action='store_true', dest='catchExceptions',
+ help='Turns on exception handling and timeouts during games', default=False)
+ parser.add_option('--timeout', dest='timeout', type='int',
+ help=default('Maximum length of time an agent can spend computing in a single game'), default=30)
+
+ options, otherjunk = parser.parse_args(argv)
+ if len(otherjunk) != 0:
+ raise Exception('Command line input not understood: ' + str(otherjunk))
+ args = dict()
+
+ # Fix the random seed
+ if options.fixRandomSeed: random.seed('cs188')
+
+ # Choose a layout
+ args['layout'] = layout.getLayout( options.layout )
+ if args['layout'] == None: raise Exception("The layout " + options.layout + " cannot be found")
+
+ # Choose a Pacman agent
+ noKeyboard = options.gameToReplay == None and (options.textGraphics or options.quietGraphics)
+ pacmanType = loadAgent(options.pacman, noKeyboard)
+ agentOpts = parseAgentArgs(options.agentArgs)
+ if options.numTraining > 0:
+ args['numTraining'] = options.numTraining
+ if 'numTraining' not in agentOpts: agentOpts['numTraining'] = options.numTraining
+ pacman = pacmanType(**agentOpts) # Instantiate Pacman with agentArgs
+ args['pacman'] = pacman
+
+ # Don't display training games
+ if 'numTrain' in agentOpts:
+ options.numQuiet = int(agentOpts['numTrain'])
+ options.numIgnore = int(agentOpts['numTrain'])
+
+ # Choose a ghost agent
+ ghostType = loadAgent(options.ghost, noKeyboard)
+ args['ghosts'] = [ghostType( i+1 ) for i in range( options.numGhosts )]
+
+ # Choose a display format
+ if options.quietGraphics:
+ import textDisplay
+ args['display'] = textDisplay.NullGraphics()
+ elif options.textGraphics:
+ import textDisplay
+ textDisplay.SLEEP_TIME = options.frameTime
+ args['display'] = textDisplay.PacmanGraphics()
+ else:
+ import graphicsDisplay
+ args['display'] = graphicsDisplay.PacmanGraphics(options.zoom, frameTime = options.frameTime)
+ args['numGames'] = options.numGames
+ args['record'] = options.record
+ args['catchExceptions'] = options.catchExceptions
+ args['timeout'] = options.timeout
+
+ # Special case: recorded games don't use the runGames method or args structure
+ if options.gameToReplay != None:
+ print 'Replaying recorded game %s.' % options.gameToReplay
+ import cPickle
+ f = open(options.gameToReplay)
+ try: recorded = cPickle.load(f)
+ finally: f.close()
+ recorded['display'] = args['display']
+ replayGame(**recorded)
+ sys.exit(0)
+
+ return args
+
+def loadAgent(pacman, nographics):
+ # Looks through all pythonPath Directories for the right module,
+ pythonPathStr = os.path.expandvars("$PYTHONPATH")
+ if pythonPathStr.find(';') == -1:
+ pythonPathDirs = pythonPathStr.split(':')
+ else:
+ pythonPathDirs = pythonPathStr.split(';')
+ pythonPathDirs.append('.')
+
+ for moduleDir in pythonPathDirs:
+ if not os.path.isdir(moduleDir): continue
+ moduleNames = [f for f in os.listdir(moduleDir) if f.endswith('gents.py')]
+ for modulename in moduleNames:
+ try:
+ module = __import__(modulename[:-3])
+ except ImportError:
+ continue
+ if pacman in dir(module):
+ if nographics and modulename == 'keyboardAgents.py':
+ raise Exception('Using the keyboard requires graphics (not text display)')
+ return getattr(module, pacman)
+ raise Exception('The agent ' + pacman + ' is not specified in any *Agents.py.')
+
+def replayGame( layout, actions, display ):
+ import pacmanAgents, ghostAgents
+ rules = ClassicGameRules()
+ agents = [pacmanAgents.GreedyAgent()] + [ghostAgents.RandomGhost(i+1) for i in range(layout.getNumGhosts())]
+ game = rules.newGame( layout, agents[0], agents[1:], display )
+ state = game.state
+ display.initialize(state.data)
+
+ for action in actions:
+ # Execute the action
+ state = state.generateSuccessor( *action )
+ # Change the display
+ display.update( state.data )
+ # Allow for game specific conditions (winning, losing, etc.)
+ rules.process(state, game)
+
+ display.finish()
+
+def runGames( layout, pacman, ghosts, display, numGames, record, numTraining = 0, catchExceptions=False, timeout=30 ):
+ import __main__
+ __main__.__dict__['_display'] = display
+
+ rules = ClassicGameRules(timeout)
+ games = []
+
+ for i in range( numGames ):
+ beQuiet = i < numTraining
+ if beQuiet:
+ # Suppress output and graphics
+ import textDisplay
+ gameDisplay = textDisplay.NullGraphics()
+ rules.quiet = True
+ else:
+ gameDisplay = display
+ rules.quiet = False
+ game = rules.newGame( layout, pacman, ghosts, gameDisplay, beQuiet, catchExceptions)
+ game.run()
+ if not beQuiet: games.append(game)
+
+ if record:
+ import time, cPickle
+ fname = ('recorded-game-%d' % (i + 1)) + '-'.join([str(t) for t in time.localtime()[1:6]])
+ f = file(fname, 'w')
+ components = {'layout': layout, 'actions': game.moveHistory}
+ cPickle.dump(components, f)
+ f.close()
+
+ if (numGames-numTraining) > 0:
+ scores = [game.state.getScore() for game in games]
+ wins = [game.state.isWin() for game in games]
+ winRate = wins.count(True)/ float(len(wins))
+ print 'Average Score:', sum(scores) / float(len(scores))
+ print 'Scores: ', ', '.join([str(score) for score in scores])
+ print 'Win Rate: %d/%d (%.2f)' % (wins.count(True), len(wins), winRate)
+ print 'Record: ', ', '.join([ ['Loss', 'Win'][int(w)] for w in wins])
+
+ return games
+
+if __name__ == '__main__':
+ """
+ The main function called when pacman.py is run
+ from the command line:
+
+ > python pacman.py
+
+ See the usage string for more details.
+
+ > python pacman.py --help
+ """
+ args = readCommand( sys.argv[1:] ) # Get game components based on input
+ runGames( **args )
+
+ # import cProfile
+ # cProfile.run("runGames( **args )")
+ pass
diff --git a/pacmanAgents.py b/pacmanAgents.py
new file mode 100644
index 0000000..ae97634
--- /dev/null
+++ b/pacmanAgents.py
@@ -0,0 +1,52 @@
+# pacmanAgents.py
+# ---------------
+# Licensing Information: You are free to use or extend these projects for
+# educational purposes provided that (1) you do not distribute or publish
+# solutions, (2) you retain this notice, and (3) you provide clear
+# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
+#
+# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
+# The core projects and autograders were primarily created by John DeNero
+# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
+# Student side autograding was added by Brad Miller, Nick Hay, and
+# Pieter Abbeel (pabbeel@cs.berkeley.edu).
+
+
+from pacman import Directions
+from game import Agent
+import random
+import game
+import util
+
+class LeftTurnAgent(game.Agent):
+ "An agent that turns left at every opportunity"
+
+ def getAction(self, state):
+ legal = state.getLegalPacmanActions()
+ current = state.getPacmanState().configuration.direction
+ if current == Directions.STOP: current = Directions.NORTH
+ left = Directions.LEFT[current]
+ if left in legal: return left
+ if current in legal: return current
+ if Directions.RIGHT[current] in legal: return Directions.RIGHT[current]
+ if Directions.LEFT[left] in legal: return Directions.LEFT[left]
+ return Directions.STOP
+
+class GreedyAgent(Agent):
+ def __init__(self, evalFn="scoreEvaluation"):
+ self.evaluationFunction = util.lookup(evalFn, globals())
+ assert self.evaluationFunction != None
+
+ def getAction(self, state):
+ # Generate candidate actions
+ legal = state.getLegalPacmanActions()
+ if Directions.STOP in legal: legal.remove(Directions.STOP)
+
+ successors = [(state.generateSuccessor(0, action), action) for action in legal]
+ scored = [(self.evaluationFunction(state), action) for state, action in successors]
+ bestScore = max(scored)[0]
+ bestActions = [pair[1] for pair in scored if pair[0] == bestScore]
+ return random.choice(bestActions)
+
+def scoreEvaluation(state):
+ return state.getScore()
diff --git a/projectParams.py b/projectParams.py
new file mode 100644
index 0000000..3502a3d
--- /dev/null
+++ b/projectParams.py
@@ -0,0 +1,18 @@
+# projectParams.py
+# ----------------
+# Licensing Information: You are free to use or extend these projects for
+# educational purposes provided that (1) you do not distribute or publish
+# solutions, (2) you retain this notice, and (3) you provide clear
+# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
+#
+# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
+# The core projects and autograders were primarily created by John DeNero
+# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
+# Student side autograding was added by Brad Miller, Nick Hay, and
+# Pieter Abbeel (pabbeel@cs.berkeley.edu).
+
+
+STUDENT_CODE_DEFAULT = 'multiAgents.py'
+PROJECT_TEST_CLASSES = 'multiagentTestClasses.py'
+PROJECT_NAME = 'Project 2: Multiagent search'
+BONUS_PIC = False
diff --git a/testClasses.py b/testClasses.py
new file mode 100644
index 0000000..67b76b5
--- /dev/null
+++ b/testClasses.py
@@ -0,0 +1,189 @@
+# testClasses.py
+# --------------
+# Licensing Information: You are free to use or extend these projects for
+# educational purposes provided that (1) you do not distribute or publish
+# solutions, (2) you retain this notice, and (3) you provide clear
+# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
+#
+# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
+# The core projects and autograders were primarily created by John DeNero
+# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
+# Student side autograding was added by Brad Miller, Nick Hay, and
+# Pieter Abbeel (pabbeel@cs.berkeley.edu).
+
+
+# import modules from python standard library
+import inspect
+import re
+import sys
+
+
+# Class which models a question in a project. Note that questions have a
+# maximum number of points they are worth, and are composed of a series of
+# test cases
+class Question(object):
+
+ def raiseNotDefined(self):
+ print 'Method not implemented: %s' % inspect.stack()[1][3]
+ sys.exit(1)
+
+ def __init__(self, questionDict, display):
+ self.maxPoints = int(questionDict['max_points'])
+ self.testCases = []
+ self.display = display
+
+ def getDisplay(self):
+ return self.display
+
+ def getMaxPoints(self):
+ return self.maxPoints
+
+ # Note that 'thunk' must be a function which accepts a single argument,
+ # namely a 'grading' object
+ def addTestCase(self, testCase, thunk):
+ self.testCases.append((testCase, thunk))
+
+ def execute(self, grades):
+ self.raiseNotDefined()
+
+# Question in which all test cases must be passed in order to receive credit
+class PassAllTestsQuestion(Question):
+
+ def execute(self, grades):
+ # TODO: is this the right way to use grades? The autograder doesn't seem to use it.
+ testsFailed = False
+ grades.assignZeroCredit()
+ for _, f in self.testCases:
+ if not f(grades):
+ testsFailed = True
+ if testsFailed:
+ grades.fail("Tests failed.")
+ else:
+ grades.assignFullCredit()
+
+
+# Question in which predict credit is given for test cases with a ``points'' property.
+# All other tests are mandatory and must be passed.
+class HackedPartialCreditQuestion(Question):
+
+ def execute(self, grades):
+ # TODO: is this the right way to use grades? The autograder doesn't seem to use it.
+ grades.assignZeroCredit()
+
+ points = 0
+ passed = True
+ for testCase, f in self.testCases:
+ testResult = f(grades)
+ if "points" in testCase.testDict:
+ if testResult: points += float(testCase.testDict["points"])
+ else:
+ passed = passed and testResult
+
+ ## FIXME: Below terrible hack to match q3's logic
+ if int(points) == self.maxPoints and not passed:
+ grades.assignZeroCredit()
+ else:
+ grades.addPoints(int(points))
+
+
+class Q6PartialCreditQuestion(Question):
+ """Fails any test which returns False, otherwise doesn't effect the grades object.
+ Partial credit tests will add the required points."""
+
+ def execute(self, grades):
+ grades.assignZeroCredit()
+
+ results = []
+ for _, f in self.testCases:
+ results.append(f(grades))
+ if False in results:
+ grades.assignZeroCredit()
+
+class PartialCreditQuestion(Question):
+ """Fails any test which returns False, otherwise doesn't effect the grades object.
+ Partial credit tests will add the required points."""
+
+ def execute(self, grades):
+ grades.assignZeroCredit()
+
+ for _, f in self.testCases:
+ if not f(grades):
+ grades.assignZeroCredit()
+ grades.fail("Tests failed.")
+ return False
+
+
+
+class NumberPassedQuestion(Question):
+ """Grade is the number of test cases passed."""
+
+ def execute(self, grades):
+ grades.addPoints([f(grades) for _, f in self.testCases].count(True))
+
+
+
+
+
+# Template modeling a generic test case
+class TestCase(object):
+
+ def raiseNotDefined(self):
+ print 'Method not implemented: %s' % inspect.stack()[1][3]
+ sys.exit(1)
+
+ def getPath(self):
+ return self.path
+
+ def __init__(self, question, testDict):
+ self.question = question
+ self.testDict = testDict
+ self.path = testDict['path']
+ self.messages = []
+
+ def __str__(self):
+ self.raiseNotDefined()
+
+ def execute(self, grades, moduleDict, solutionDict):
+ self.raiseNotDefined()
+
+ def writeSolution(self, moduleDict, filePath):
+ self.raiseNotDefined()
+ return True
+
+ # Tests should call the following messages for grading
+ # to ensure a uniform format for test output.
+ #
+ # TODO: this is hairy, but we need to fix grading.py's interface
+ # to get a nice hierarchical project - question - test structure,
+ # then these should be moved into Question proper.
+ def testPass(self, grades):
+ grades.addMessage('PASS: %s' % (self.path,))
+ for line in self.messages:
+ grades.addMessage(' %s' % (line,))
+ return True
+
+ def testFail(self, grades):
+ grades.addMessage('FAIL: %s' % (self.path,))
+ for line in self.messages:
+ grades.addMessage(' %s' % (line,))
+ return False
+
+ # This should really be question level?
+ #
+ def testPartial(self, grades, points, maxPoints):
+ grades.addPoints(points)
+ extraCredit = max(0, points - maxPoints)
+ regularCredit = points - extraCredit
+
+ grades.addMessage('%s: %s (%s of %s points)' % ("PASS" if points >= maxPoints else "FAIL", self.path, regularCredit, maxPoints))
+ if extraCredit > 0:
+ grades.addMessage('EXTRA CREDIT: %s points' % (extraCredit,))
+
+ for line in self.messages:
+ grades.addMessage(' %s' % (line,))
+
+ return True
+
+ def addMessage(self, message):
+ self.messages.extend(message.split('\n'))
+
diff --git a/testParser.py b/testParser.py
new file mode 100644
index 0000000..ceedeaf
--- /dev/null
+++ b/testParser.py
@@ -0,0 +1,85 @@
+# testParser.py
+# -------------
+# Licensing Information: You are free to use or extend these projects for
+# educational purposes provided that (1) you do not distribute or publish
+# solutions, (2) you retain this notice, and (3) you provide clear
+# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
+#
+# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
+# The core projects and autograders were primarily created by John DeNero
+# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
+# Student side autograding was added by Brad Miller, Nick Hay, and
+# Pieter Abbeel (pabbeel@cs.berkeley.edu).
+
+
+import re
+import sys
+
+class TestParser(object):
+
+ def __init__(self, path):
+ # save the path to the test file
+ self.path = path
+
+ def removeComments(self, rawlines):
+ # remove any portion of a line following a '#' symbol
+ fixed_lines = []
+ for l in rawlines:
+ idx = l.find('#')
+ if idx == -1:
+ fixed_lines.append(l)
+ else:
+ fixed_lines.append(l[0:idx])
+ return '\n'.join(fixed_lines)
+
+ def parse(self):
+ # read in the test case and remove comments
+ test = {}
+ with open(self.path) as handle:
+ raw_lines = handle.read().split('\n')
+
+ test_text = self.removeComments(raw_lines)
+ test['__raw_lines__'] = raw_lines
+ test['path'] = self.path
+ test['__emit__'] = []
+ lines = test_text.split('\n')
+ i = 0
+ # read a property in each loop cycle
+ while(i < len(lines)):
+ # skip blank lines
+ if re.match('\A\s*\Z', lines[i]):
+ test['__emit__'].append(("raw", raw_lines[i]))
+ i += 1
+ continue
+ m = re.match('\A([^"]*?):\s*"([^"]*)"\s*\Z', lines[i])
+ if m:
+ test[m.group(1)] = m.group(2)
+ test['__emit__'].append(("oneline", m.group(1)))
+ i += 1
+ continue
+ m = re.match('\A([^"]*?):\s*"""\s*\Z', lines[i])
+ if m:
+ msg = []
+ i += 1
+ while(not re.match('\A\s*"""\s*\Z', lines[i])):
+ msg.append(raw_lines[i])
+ i += 1
+ test[m.group(1)] = '\n'.join(msg)
+ test['__emit__'].append(("multiline", m.group(1)))
+ i += 1
+ continue
+ print 'error parsing test file: %s' % self.path
+ sys.exit(1)
+ return test
+
+
+def emitTestDict(testDict, handle):
+ for kind, data in testDict['__emit__']:
+ if kind == "raw":
+ handle.write(data + "\n")
+ elif kind == "oneline":
+ handle.write('%s: "%s"\n' % (data, testDict[data]))
+ elif kind == "multiline":
+ handle.write('%s: """\n%s\n"""\n' % (data, testDict[data]))
+ else:
+ raise Exception("Bad __emit__")
diff --git a/test_cases/CONFIG b/test_cases/CONFIG
new file mode 100644
index 0000000..e01f27b
--- /dev/null
+++ b/test_cases/CONFIG
@@ -0,0 +1 @@
+order: "q1 q2 q3 q4 q5"
diff --git a/test_cases/extra/CONFIG b/test_cases/extra/CONFIG
new file mode 100644
index 0000000..3a646dd
--- /dev/null
+++ b/test_cases/extra/CONFIG
@@ -0,0 +1,2 @@
+max_points: "0"
+class: "PartialCreditQuestion"
diff --git a/test_cases/extra/grade-agent.test b/test_cases/extra/grade-agent.test
new file mode 100644
index 0000000..5da84c6
--- /dev/null
+++ b/test_cases/extra/grade-agent.test
@@ -0,0 +1,11 @@
+class: "EvalAgentTest"
+
+agentName: "ContestAgent"
+layoutName: "contestClassic"
+maxTime: "180"
+numGames: "5"
+
+scoreThresholds: "2500 2900"
+
+randomSeed: "0"
+ghosts: "[DirectionalGhost(1), DirectionalGhost(2), DirectionalGhost(3)]"
diff --git a/test_cases/q1/CONFIG b/test_cases/q1/CONFIG
new file mode 100644
index 0000000..426bfe9
--- /dev/null
+++ b/test_cases/q1/CONFIG
@@ -0,0 +1,2 @@
+max_points: "4"
+class: "PartialCreditQuestion"
diff --git a/test_cases/q1/grade-agent.solution b/test_cases/q1/grade-agent.solution
new file mode 100644
index 0000000..278af4f
--- /dev/null
+++ b/test_cases/q1/grade-agent.solution
@@ -0,0 +1,2 @@
+# This is the solution file for test_cases/q1/grade-agent.test.
+# File intentionally blank.
diff --git a/test_cases/q1/grade-agent.test b/test_cases/q1/grade-agent.test
new file mode 100644
index 0000000..3a70660
--- /dev/null
+++ b/test_cases/q1/grade-agent.test
@@ -0,0 +1,18 @@
+class: "EvalAgentTest"
+
+agentName: "ReflexAgent"
+layoutName: "openClassic"
+maxTime: "120"
+numGames: "10"
+
+
+nonTimeoutMinimum: "10"
+
+scoreThresholds: "500 1000"
+
+winsMinimum: "1"
+winsThresholds: "5 10"
+
+
+randomSeed: "0"
+ghosts: "[RandomGhost(1)]"
diff --git a/test_cases/q2/0-lecture-6-tree.solution b/test_cases/q2/0-lecture-6-tree.solution
new file mode 100644
index 0000000..3c6a74d
--- /dev/null
+++ b/test_cases/q2/0-lecture-6-tree.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q2/0-lecture-6-tree.test.
+action: "Center"
+generated: "A B C D E F G H I max min1 min2 min3"
diff --git a/test_cases/q2/0-lecture-6-tree.test b/test_cases/q2/0-lecture-6-tree.test
new file mode 100644
index 0000000..debf815
--- /dev/null
+++ b/test_cases/q2/0-lecture-6-tree.test
@@ -0,0 +1,50 @@
+class: "GraphGameTreeTest"
+alg: "MinimaxAgent"
+depth: "2"
+
+# Tree from lecture 6 slides
+diagram: """
+ max
+ /-/ | \--\
+ / | \
+ / | \
+ min1 min2 min3
+ /|\ /|\ /|\
+ / | \ / | \ / | \
+A B C D E F G H I
+3 12 8 5 4 6 14 1 11
+"""
+
+num_agents: "2"
+
+start_state: "max"
+win_states: "A B C D E F G H I"
+lose_states: ""
+
+successors: """
+max Left min1
+max Center min2
+max Right min3
+min1 Left A
+min1 Center B
+min1 Right C
+min2 Left D
+min2 Center E
+min2 Right F
+min3 Left G
+min3 Center H
+min3 Right I
+"""
+
+
+evaluation: """
+A 3.0
+B 12.0
+C 8.0
+D 5.0
+E 4.0
+F 6.0
+G 14.0
+H 1.0
+I 11.0
+"""
diff --git a/test_cases/q2/0-small-tree.solution b/test_cases/q2/0-small-tree.solution
new file mode 100644
index 0000000..f381f9a
--- /dev/null
+++ b/test_cases/q2/0-small-tree.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q2/0-small-tree.test.
+action: "pacLeft"
+generated: "A B C D deeper minLeft minRight root"
diff --git a/test_cases/q2/0-small-tree.test b/test_cases/q2/0-small-tree.test
new file mode 100644
index 0000000..5eaa65c
--- /dev/null
+++ b/test_cases/q2/0-small-tree.test
@@ -0,0 +1,36 @@
+class: "GraphGameTreeTest"
+alg: "MinimaxAgent"
+depth: "3"
+
+diagram: """
+ root
+ / \
+ minLeft minRight
+ / \ / \
+ A B C deeper
+ 4 3 2 |
+ D
+ 1000
+"""
+num_agents: "2"
+
+start_state: "root"
+win_states: "A C"
+lose_states: "B D"
+
+successors: """
+root pacLeft minLeft
+root pacRight minRight
+minLeft gLeft A
+minLeft gRight B
+minRight gLeft C
+minRight gRight deeper
+deeper pacLeft D
+"""
+
+evaluation: """
+A 4.0
+B 3.0
+C 2.0
+D 1000.0
+"""
diff --git a/test_cases/q2/1-1-minmax.solution b/test_cases/q2/1-1-minmax.solution
new file mode 100644
index 0000000..3ac7510
--- /dev/null
+++ b/test_cases/q2/1-1-minmax.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q2/1-1-minmax.test.
+action: "Left"
+generated: "a b1 b2 c1 c2 cx d1 d2 d3 d4 dx"
diff --git a/test_cases/q2/1-1-minmax.test b/test_cases/q2/1-1-minmax.test
new file mode 100644
index 0000000..addd65b
--- /dev/null
+++ b/test_cases/q2/1-1-minmax.test
@@ -0,0 +1,47 @@
+class: "GraphGameTreeTest"
+alg: "MinimaxAgent"
+depth: "3"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ / \ |
+ c1 c2 cx
+ / \ / \ |
+ d1 d2 d3 d4 dx
+-3 -9 10 6 -3.01
+
+a - max
+b - min
+c - max
+
+Note that the minimax value of b1 is -3.
+"""
+num_agents: "2"
+
+start_state: "a"
+win_states: "d1 d2 d3 d4 dx"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Left c1
+b1 Right c2
+b2 Down cx
+c1 Left d1
+c1 Right d2
+c2 Left d3
+c2 Right d4
+cx Down dx
+"""
+
+evaluation: """
+d1 -3.0
+d2 -9.0
+d3 10.0
+d4 6.0
+dx -3.01
+"""
diff --git a/test_cases/q2/1-2-minmax.solution b/test_cases/q2/1-2-minmax.solution
new file mode 100644
index 0000000..e40e3de
--- /dev/null
+++ b/test_cases/q2/1-2-minmax.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q2/1-2-minmax.test.
+action: "Right"
+generated: "a b1 b2 c1 c2 cx d1 d2 d3 d4 dx"
diff --git a/test_cases/q2/1-2-minmax.test b/test_cases/q2/1-2-minmax.test
new file mode 100644
index 0000000..44e0a77
--- /dev/null
+++ b/test_cases/q2/1-2-minmax.test
@@ -0,0 +1,47 @@
+class: "GraphGameTreeTest"
+alg: "MinimaxAgent"
+depth: "3"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ / \ |
+ c1 c2 cx
+ / \ / \ |
+ d1 d2 d3 d4 dx
+-3 -9 10 6 -2.99
+
+a - max
+b - min
+c - max
+
+Note that the minimax value of b1 is -3.
+"""
+num_agents: "2"
+
+start_state: "a"
+win_states: "d1 d2 d3 d4 dx"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Left c1
+b1 Right c2
+b2 Down cx
+c1 Left d1
+c1 Right d2
+c2 Left d3
+c2 Right d4
+cx Down dx
+"""
+
+evaluation: """
+d1 -3.0
+d2 -9.0
+d3 10.0
+d4 6.0
+dx -2.99
+"""
diff --git a/test_cases/q2/1-3-minmax.solution b/test_cases/q2/1-3-minmax.solution
new file mode 100644
index 0000000..513961c
--- /dev/null
+++ b/test_cases/q2/1-3-minmax.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q2/1-3-minmax.test.
+action: "Left"
+generated: "a b1 b2 c3 c4 cx d5 d6 d7 d8 dx"
diff --git a/test_cases/q2/1-3-minmax.test b/test_cases/q2/1-3-minmax.test
new file mode 100644
index 0000000..5f4b12d
--- /dev/null
+++ b/test_cases/q2/1-3-minmax.test
@@ -0,0 +1,47 @@
+class: "GraphGameTreeTest"
+alg: "MinimaxAgent"
+depth: "3"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ | / \
+ cx c3 c4
+ | / \ / \
+ dx d5 d6 d7 d8
+ 4.01 4 -7 0 5
+
+a - max
+b - min
+c - max
+
+Note that the minimax value of b2 is 4.
+"""
+num_agents: "2"
+
+start_state: "a"
+win_states: "d1 d2 d3 d4 d5 d6 d7 d8 dx"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Down cx
+b2 Left c3
+b2 Right c4
+c3 Left d5
+c3 Right d6
+c4 Left d7
+c4 Right d8
+cx Down dx
+"""
+
+evaluation: """
+d5 4.0
+d6 -7.0
+d7 0.0
+d8 5.0
+dx 4.01
+"""
diff --git a/test_cases/q2/1-4-minmax.solution b/test_cases/q2/1-4-minmax.solution
new file mode 100644
index 0000000..1ae4c0f
--- /dev/null
+++ b/test_cases/q2/1-4-minmax.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q2/1-4-minmax.test.
+action: "Right"
+generated: "a b1 b2 c3 c4 cx d5 d6 d7 d8 dx"
diff --git a/test_cases/q2/1-4-minmax.test b/test_cases/q2/1-4-minmax.test
new file mode 100644
index 0000000..445fe0f
--- /dev/null
+++ b/test_cases/q2/1-4-minmax.test
@@ -0,0 +1,47 @@
+class: "GraphGameTreeTest"
+alg: "MinimaxAgent"
+depth: "3"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ | / \
+ cx c3 c4
+ | / \ / \
+ dx d5 d6 d7 d8
+ 3.99 4 -7 0 5
+
+a - max
+b - min
+c - max
+
+Note that the minimax value of b2 is 4.
+"""
+num_agents: "2"
+
+start_state: "a"
+win_states: "d1 d2 d3 d4 d5 d6 d7 d8 dx"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Down cx
+b2 Left c3
+b2 Right c4
+c3 Left d5
+c3 Right d6
+c4 Left d7
+c4 Right d8
+cx Down dx
+"""
+
+evaluation: """
+d5 4.0
+d6 -7.0
+d7 0.0
+d8 5.0
+dx 3.99
+"""
diff --git a/test_cases/q2/1-5-minmax.solution b/test_cases/q2/1-5-minmax.solution
new file mode 100644
index 0000000..0553ca7
--- /dev/null
+++ b/test_cases/q2/1-5-minmax.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q2/1-5-minmax.test.
+action: "Right"
+generated: "A B C D E F G H Z a b1 b2 c1 c2 cx d1 d2 d3 d4 dx"
diff --git a/test_cases/q2/1-5-minmax.test b/test_cases/q2/1-5-minmax.test
new file mode 100644
index 0000000..5ce2ba9
--- /dev/null
+++ b/test_cases/q2/1-5-minmax.test
@@ -0,0 +1,75 @@
+class: "GraphGameTreeTest"
+alg: "MinimaxAgent"
+depth: "4"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ / \ |
+ c1 c2 cx
+ / \ / \ |
+ d1 d2 d3 d4 dx
+ / \ / \ / \ / \ |
+ A B C D E F G H Z
+-3 13 5 9 10 3 -6 8 3.01
+
+a - max
+b - min
+c - max
+d - min
+
+Note the minimax value of b1 is 3.
+"""
+num_agents: "2"
+
+start_state: "a"
+win_states: "A B C D E F G H I J K L M N O P Z"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Left c1
+b1 Right c2
+b2 Down cx
+c1 Left d1
+c1 Right d2
+c2 Left d3
+c2 Right d4
+c3 Left d5
+c3 Right d6
+c4 Left d7
+c4 Right d8
+cx Down dx
+d1 Left A
+d1 Right B
+d2 Left C
+d2 Right D
+d3 Left E
+d3 Right F
+d4 Left G
+d4 Right H
+d5 Left I
+d5 Right J
+d6 Left K
+d6 Right L
+d7 Left M
+d7 Right N
+d8 Left O
+d8 Right P
+dx Down Z
+"""
+
+evaluation: """
+A -3.0
+B 13.0
+C 5.0
+D 9.0
+E 10.0
+F 3.0
+G -6.0
+H 8.0
+Z 3.01
+"""
diff --git a/test_cases/q2/1-6-minmax.solution b/test_cases/q2/1-6-minmax.solution
new file mode 100644
index 0000000..f25e068
--- /dev/null
+++ b/test_cases/q2/1-6-minmax.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q2/1-6-minmax.test.
+action: "Left"
+generated: "A B C D E F G H Z a b1 b2 c1 c2 cx d1 d2 d3 d4 dx"
diff --git a/test_cases/q2/1-6-minmax.test b/test_cases/q2/1-6-minmax.test
new file mode 100644
index 0000000..44b166f
--- /dev/null
+++ b/test_cases/q2/1-6-minmax.test
@@ -0,0 +1,75 @@
+class: "GraphGameTreeTest"
+alg: "MinimaxAgent"
+depth: "4"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ / \ |
+ c1 c2 cx
+ / \ / \ |
+ d1 d2 d3 d4 dx
+ / \ / \ / \ / \ |
+ A B C D E F G H Z
+-3 13 5 9 10 3 -6 8 2.99
+
+a - max
+b - min
+c - max
+d - min
+
+Note the minimax value of b1 is 3.
+"""
+num_agents: "2"
+
+start_state: "a"
+win_states: "A B C D E F G H I J K L M N O P Z"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Left c1
+b1 Right c2
+b2 Down cx
+c1 Left d1
+c1 Right d2
+c2 Left d3
+c2 Right d4
+c3 Left d5
+c3 Right d6
+c4 Left d7
+c4 Right d8
+cx Down dx
+d1 Left A
+d1 Right B
+d2 Left C
+d2 Right D
+d3 Left E
+d3 Right F
+d4 Left G
+d4 Right H
+d5 Left I
+d5 Right J
+d6 Left K
+d6 Right L
+d7 Left M
+d7 Right N
+d8 Left O
+d8 Right P
+dx Down Z
+"""
+
+evaluation: """
+A -3.0
+B 13.0
+C 5.0
+D 9.0
+E 10.0
+F 3.0
+G -6.0
+H 8.0
+Z 2.99
+"""
diff --git a/test_cases/q2/1-7-minmax.solution b/test_cases/q2/1-7-minmax.solution
new file mode 100644
index 0000000..fd801a8
--- /dev/null
+++ b/test_cases/q2/1-7-minmax.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q2/1-7-minmax.test.
+action: "Left"
+generated: "I J K L M N O P Z a b1 b2 c3 c4 cx d5 d6 d7 d8 dx"
diff --git a/test_cases/q2/1-7-minmax.test b/test_cases/q2/1-7-minmax.test
new file mode 100644
index 0000000..534cb39
--- /dev/null
+++ b/test_cases/q2/1-7-minmax.test
@@ -0,0 +1,75 @@
+class: "GraphGameTreeTest"
+alg: "MinimaxAgent"
+depth: "4"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ | / \
+ cx c3 c4
+ | / \ / \
+ dx d5 d6 d7 d8
+ | / \ / \ / \ / \
+ Z I J K L M N O P
+ -1.99 -1 -9 4 7 2 5 -3 -2
+
+a - max
+b - min
+c - min
+d - max
+
+Note that the minimax value of b2 is -2
+"""
+num_agents: "3"
+
+start_state: "a"
+win_states: "A B C D E F G H I J K L M N O P Z"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Down cx
+b2 Left c3
+b2 Right c4
+c1 Left d1
+c1 Right d2
+c2 Left d3
+c2 Right d4
+c3 Left d5
+c3 Right d6
+c4 Left d7
+c4 Right d8
+cx Down dx
+d1 Left A
+d1 Right B
+d2 Left C
+d2 Right D
+d3 Left E
+d3 Right F
+d4 Left G
+d4 Right H
+d5 Left I
+d5 Right J
+d6 Left K
+d6 Right L
+d7 Left M
+d7 Right N
+d8 Left O
+d8 Right P
+dx Down Z
+"""
+
+evaluation: """
+I -1.0
+J -9.0
+K 4.0
+L 7.0
+M 2.0
+N 5.0
+O -3.0
+P -2.0
+Z -1.99
+"""
diff --git a/test_cases/q2/1-8-minmax.solution b/test_cases/q2/1-8-minmax.solution
new file mode 100644
index 0000000..80c7ebe
--- /dev/null
+++ b/test_cases/q2/1-8-minmax.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q2/1-8-minmax.test.
+action: "Right"
+generated: "I J K L M N O P Z a b1 b2 c3 c4 cx d5 d6 d7 d8 dx"
diff --git a/test_cases/q2/1-8-minmax.test b/test_cases/q2/1-8-minmax.test
new file mode 100644
index 0000000..7d3ead9
--- /dev/null
+++ b/test_cases/q2/1-8-minmax.test
@@ -0,0 +1,75 @@
+class: "GraphGameTreeTest"
+alg: "MinimaxAgent"
+depth: "4"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ | / \
+ cx c3 c4
+ | / \ / \
+ dx d5 d6 d7 d8
+ | / \ / \ / \ / \
+ Z I J K L M N O P
+ -2.01 -1 -9 4 7 2 5 -3 -2
+
+a - max
+b - min
+c - min
+d - max
+
+Note that the minimax value of b2 is -2.01
+"""
+num_agents: "3"
+
+start_state: "a"
+win_states: "A B C D E F G H I J K L M N O P Z"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Down cx
+b2 Left c3
+b2 Right c4
+c1 Left d1
+c1 Right d2
+c2 Left d3
+c2 Right d4
+c3 Left d5
+c3 Right d6
+c4 Left d7
+c4 Right d8
+cx Down dx
+d1 Left A
+d1 Right B
+d2 Left C
+d2 Right D
+d3 Left E
+d3 Right F
+d4 Left G
+d4 Right H
+d5 Left I
+d5 Right J
+d6 Left K
+d6 Right L
+d7 Left M
+d7 Right N
+d8 Left O
+d8 Right P
+dx Down Z
+"""
+
+evaluation: """
+I -1.0
+J -9.0
+K 4.0
+L 7.0
+M 2.0
+N 5.0
+O -3.0
+P -2.0
+Z -2.01
+"""
diff --git a/test_cases/q2/2-1a-vary-depth.solution b/test_cases/q2/2-1a-vary-depth.solution
new file mode 100644
index 0000000..9dd1708
--- /dev/null
+++ b/test_cases/q2/2-1a-vary-depth.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q2/2-1a-vary-depth.test.
+action: "Left"
+generated: "a b1 b2 c1 c2 cx"
diff --git a/test_cases/q2/2-1a-vary-depth.test b/test_cases/q2/2-1a-vary-depth.test
new file mode 100644
index 0000000..321b1ce
--- /dev/null
+++ b/test_cases/q2/2-1a-vary-depth.test
@@ -0,0 +1,52 @@
+class: "GraphGameTreeTest"
+alg: "MinimaxAgent"
+depth: "1"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ / \ |
+-4 c1 c2 9 cx -4.01
+ / \ / \ |
+ d1 d2 d3 d4 dx
+-3 -9 10 6 -4.01
+
+a - max
+b - min
+c - max
+
+Note that the minimax value of b1 is -3, but the depth=1 limited value is -4.
+The values next to c1, c2, and cx are the values of the evaluation function, not
+necessarily the correct minimax backup.
+"""
+num_agents: "2"
+
+start_state: "a"
+win_states: "d1 d2 d3 d4 dx"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Left c1
+b1 Right c2
+b2 Down cx
+c1 Left d1
+c1 Right d2
+c2 Left d3
+c2 Right d4
+cx Down dx
+"""
+
+evaluation: """
+c1 -4.0
+c2 9.0
+cx -4.01
+d1 -3.0
+d2 -9.0
+d3 10.0
+d4 6.0
+dx -4.01
+"""
diff --git a/test_cases/q2/2-1b-vary-depth.solution b/test_cases/q2/2-1b-vary-depth.solution
new file mode 100644
index 0000000..a824e44
--- /dev/null
+++ b/test_cases/q2/2-1b-vary-depth.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q2/2-1b-vary-depth.test.
+action: "Left"
+generated: "a b1 b2 c1 c2 cx d1 d2 d3 d4 dx"
diff --git a/test_cases/q2/2-1b-vary-depth.test b/test_cases/q2/2-1b-vary-depth.test
new file mode 100644
index 0000000..d0d656d
--- /dev/null
+++ b/test_cases/q2/2-1b-vary-depth.test
@@ -0,0 +1,52 @@
+class: "GraphGameTreeTest"
+alg: "MinimaxAgent"
+depth: "2"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ / \ |
+-4 c1 c2 9 cx -4.01
+ / \ / \ |
+ d1 d2 d3 d4 dx
+-3 -9 10 6 -4.01
+
+a - max
+b - min
+c - max
+
+Note that the minimax value of b1 is -3, but the depth=1 limited value is -4.
+The values next to c1, c2, and cx are the values of the evaluation function, not
+necessarily the correct minimax backup.
+"""
+num_agents: "2"
+
+start_state: "a"
+win_states: "d1 d2 d3 d4 dx"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Left c1
+b1 Right c2
+b2 Down cx
+c1 Left d1
+c1 Right d2
+c2 Left d3
+c2 Right d4
+cx Down dx
+"""
+
+evaluation: """
+c1 -4.0
+c2 9.0
+cx -4.01
+d1 -3.0
+d2 -9.0
+d3 10.0
+d4 6.0
+dx -4.01
+"""
diff --git a/test_cases/q2/2-2a-vary-depth.solution b/test_cases/q2/2-2a-vary-depth.solution
new file mode 100644
index 0000000..b4b0e98
--- /dev/null
+++ b/test_cases/q2/2-2a-vary-depth.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q2/2-2a-vary-depth.test.
+action: "Right"
+generated: "a b1 b2 c1 c2 cx"
diff --git a/test_cases/q2/2-2a-vary-depth.test b/test_cases/q2/2-2a-vary-depth.test
new file mode 100644
index 0000000..204877f
--- /dev/null
+++ b/test_cases/q2/2-2a-vary-depth.test
@@ -0,0 +1,52 @@
+class: "GraphGameTreeTest"
+alg: "MinimaxAgent"
+depth: "1"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ / \ |
+-4 c1 c2 9 cx -3.99
+ / \ / \ |
+ d1 d2 d3 d4 dx
+-3 -9 10 6 -3.99
+
+a - max
+b - min
+c - max
+
+Note that the minimax value of b1 is -3, but the depth=1 limited value is -4.
+The values next to c1, c2, and cx are the values of the evaluation function, not
+necessarily the correct minimax backup.
+"""
+num_agents: "2"
+
+start_state: "a"
+win_states: "d1 d2 d3 d4 dx"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Left c1
+b1 Right c2
+b2 Down cx
+c1 Left d1
+c1 Right d2
+c2 Left d3
+c2 Right d4
+cx Down dx
+"""
+
+evaluation: """
+c1 -4.0
+c2 9.0
+cx -3.99
+d1 -3.0
+d2 -9.0
+d3 10.0
+d4 6.0
+dx -3.99
+"""
diff --git a/test_cases/q2/2-2b-vary-depth.solution b/test_cases/q2/2-2b-vary-depth.solution
new file mode 100644
index 0000000..94e2089
--- /dev/null
+++ b/test_cases/q2/2-2b-vary-depth.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q2/2-2b-vary-depth.test.
+action: "Left"
+generated: "a b1 b2 c1 c2 cx d1 d2 d3 d4 dx"
diff --git a/test_cases/q2/2-2b-vary-depth.test b/test_cases/q2/2-2b-vary-depth.test
new file mode 100644
index 0000000..1e4ef41
--- /dev/null
+++ b/test_cases/q2/2-2b-vary-depth.test
@@ -0,0 +1,52 @@
+class: "GraphGameTreeTest"
+alg: "MinimaxAgent"
+depth: "2"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ / \ |
+-4 c1 c2 9 cx -3.99
+ / \ / \ |
+ d1 d2 d3 d4 dx
+-3 -9 10 6 -3.99
+
+a - max
+b - min
+c - max
+
+Note that the minimax value of b1 is -3, but the depth=1 limited value is -4.
+The values next to c1, c2, and cx are the values of the evaluation function, not
+necessarily the correct minimax backup.
+"""
+num_agents: "2"
+
+start_state: "a"
+win_states: "d1 d2 d3 d4 dx"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Left c1
+b1 Right c2
+b2 Down cx
+c1 Left d1
+c1 Right d2
+c2 Left d3
+c2 Right d4
+cx Down dx
+"""
+
+evaluation: """
+c1 -4.0
+c2 9.0
+cx -3.99
+d1 -3.0
+d2 -9.0
+d3 10.0
+d4 6.0
+dx -3.99
+"""
diff --git a/test_cases/q2/2-3a-vary-depth.solution b/test_cases/q2/2-3a-vary-depth.solution
new file mode 100644
index 0000000..1bab661
--- /dev/null
+++ b/test_cases/q2/2-3a-vary-depth.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q2/2-3a-vary-depth.test.
+action: "Left"
+generated: "a b1 b2 c3 c4 cx"
diff --git a/test_cases/q2/2-3a-vary-depth.test b/test_cases/q2/2-3a-vary-depth.test
new file mode 100644
index 0000000..e083759
--- /dev/null
+++ b/test_cases/q2/2-3a-vary-depth.test
@@ -0,0 +1,52 @@
+class: "GraphGameTreeTest"
+alg: "MinimaxAgent"
+depth: "1"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ | / \
+ 5.01 cx 8 c3 c4 5
+ | / \ / \
+ dx d5 d6 d7 d8
+ 5.01 4 -7 0 5
+
+a - max
+b - min
+c - max
+
+Note that the minimax value of b1 is 4, but the depth=1 limited value is 5.
+The values next to c3, c4, and cx are the values of the evaluation function, not
+necessarily the correct minimax backup.
+"""
+num_agents: "2"
+
+start_state: "a"
+win_states: "d1 d2 d3 d4 d5 d6 d7 d8 dx"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Down cx
+b2 Left c3
+b2 Right c4
+c3 Left d5
+c3 Right d6
+c4 Left d7
+c4 Right d8
+cx Down dx
+"""
+
+evaluation: """
+c3 8.0
+c4 5.0
+cx 5.01
+d5 4.0
+d6 -7.0
+d7 0.0
+d8 5.0
+dx 5.01
+"""
diff --git a/test_cases/q2/2-3b-vary-depth.solution b/test_cases/q2/2-3b-vary-depth.solution
new file mode 100644
index 0000000..fb389ec
--- /dev/null
+++ b/test_cases/q2/2-3b-vary-depth.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q2/2-3b-vary-depth.test.
+action: "Left"
+generated: "a b1 b2 c3 c4 cx d5 d6 d7 d8 dx"
diff --git a/test_cases/q2/2-3b-vary-depth.test b/test_cases/q2/2-3b-vary-depth.test
new file mode 100644
index 0000000..cbe88b5
--- /dev/null
+++ b/test_cases/q2/2-3b-vary-depth.test
@@ -0,0 +1,52 @@
+class: "GraphGameTreeTest"
+alg: "MinimaxAgent"
+depth: "2"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ | / \
+ 5.01 cx 8 c3 c4 5
+ | / \ / \
+ dx d5 d6 d7 d8
+ 5.01 4 -7 0 5
+
+a - max
+b - min
+c - max
+
+Note that the minimax value of b1 is 4, but the depth=1 limited value is 5.
+The values next to c3, c4, and cx are the values of the evaluation function, not
+necessarily the correct minimax backup.
+"""
+num_agents: "2"
+
+start_state: "a"
+win_states: "d1 d2 d3 d4 d5 d6 d7 d8 dx"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Down cx
+b2 Left c3
+b2 Right c4
+c3 Left d5
+c3 Right d6
+c4 Left d7
+c4 Right d8
+cx Down dx
+"""
+
+evaluation: """
+c3 8.0
+c4 5.0
+cx 5.01
+d5 4.0
+d6 -7.0
+d7 0.0
+d8 5.0
+dx 5.01
+"""
diff --git a/test_cases/q2/2-4a-vary-depth.solution b/test_cases/q2/2-4a-vary-depth.solution
new file mode 100644
index 0000000..815147d
--- /dev/null
+++ b/test_cases/q2/2-4a-vary-depth.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q2/2-4a-vary-depth.test.
+action: "Right"
+generated: "a b1 b2 c3 c4 cx"
diff --git a/test_cases/q2/2-4a-vary-depth.test b/test_cases/q2/2-4a-vary-depth.test
new file mode 100644
index 0000000..e519969
--- /dev/null
+++ b/test_cases/q2/2-4a-vary-depth.test
@@ -0,0 +1,52 @@
+class: "GraphGameTreeTest"
+alg: "MinimaxAgent"
+depth: "1"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ | / \
+ 4.99 cx 8 c3 c4 5
+ | / \ / \
+ dx d5 d6 d7 d8
+ 4.99 4 -7 0 5
+
+a - max
+b - min
+c - max
+
+Note that the minimax value of b1 is 4, but the depth=1 limited value is 5.
+The values next to c3, c4, and cx are the values of the evaluation function, not
+necessarily the correct minimax backup.
+"""
+num_agents: "2"
+
+start_state: "a"
+win_states: "d1 d2 d3 d4 d5 d6 d7 d8 dx"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Down cx
+b2 Left c3
+b2 Right c4
+c3 Left d5
+c3 Right d6
+c4 Left d7
+c4 Right d8
+cx Down dx
+"""
+
+evaluation: """
+c3 8.0
+c4 5.0
+cx 4.99
+d5 4.0
+d6 -7.0
+d7 0.0
+d8 5.0
+dx 4.99
+"""
diff --git a/test_cases/q2/2-4b-vary-depth.solution b/test_cases/q2/2-4b-vary-depth.solution
new file mode 100644
index 0000000..3b12519
--- /dev/null
+++ b/test_cases/q2/2-4b-vary-depth.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q2/2-4b-vary-depth.test.
+action: "Left"
+generated: "a b1 b2 c3 c4 cx d5 d6 d7 d8 dx"
diff --git a/test_cases/q2/2-4b-vary-depth.test b/test_cases/q2/2-4b-vary-depth.test
new file mode 100644
index 0000000..a80e82d
--- /dev/null
+++ b/test_cases/q2/2-4b-vary-depth.test
@@ -0,0 +1,52 @@
+class: "GraphGameTreeTest"
+alg: "MinimaxAgent"
+depth: "2"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ | / \
+ 4.99 cx 8 c3 c4 5
+ | / \ / \
+ dx d5 d6 d7 d8
+ 4.99 4 -7 0 5
+
+a - max
+b - min
+c - max
+
+Note that the minimax value of b1 is 4, but the depth=1 limited value is 5.
+The values next to c3, c4, and cx are the values of the evaluation function, not
+necessarily the correct minimax backup.
+"""
+num_agents: "2"
+
+start_state: "a"
+win_states: "d1 d2 d3 d4 d5 d6 d7 d8 dx"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Down cx
+b2 Left c3
+b2 Right c4
+c3 Left d5
+c3 Right d6
+c4 Left d7
+c4 Right d8
+cx Down dx
+"""
+
+evaluation: """
+c3 8.0
+c4 5.0
+cx 4.99
+d5 4.0
+d6 -7.0
+d7 0.0
+d8 5.0
+dx 4.99
+"""
diff --git a/test_cases/q2/2-one-ghost-3level.solution b/test_cases/q2/2-one-ghost-3level.solution
new file mode 100644
index 0000000..f7b10fb
--- /dev/null
+++ b/test_cases/q2/2-one-ghost-3level.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q2/2-one-ghost-3level.test.
+action: "Left"
+generated: "a b1 b2 c1 c2 c3 c4 d1 d2 d3 d4 d5 d6 d7 d8"
diff --git a/test_cases/q2/2-one-ghost-3level.test b/test_cases/q2/2-one-ghost-3level.test
new file mode 100644
index 0000000..90d4685
--- /dev/null
+++ b/test_cases/q2/2-one-ghost-3level.test
@@ -0,0 +1,52 @@
+class: "GraphGameTreeTest"
+alg: "MinimaxAgent"
+depth: "3"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ / \ / \
+ c1 c2 c3 c4
+ / \ / \ / \ / \
+ d1 d2 d3 d4 d5 d6 d7 d8
+ 3 9 10 6 4 7 0 5
+
+a - max
+b - min
+c - max
+"""
+num_agents: "2"
+
+start_state: "a"
+win_states: "d1 d2 d3 d4 d5 d6 d7 d8"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Left c1
+b1 Right c2
+b2 Left c3
+b2 Right c4
+c1 Left d1
+c1 Right d2
+c2 Left d3
+c2 Right d4
+c3 Left d5
+c3 Right d6
+c4 Left d7
+c4 Right d8
+"""
+
+evaluation: """
+d1 3.0
+d2 9.0
+d3 10.0
+d4 6.0
+d5 4.0
+d6 7.0
+d7 0.0
+d8 5.0
+"""
diff --git a/test_cases/q2/3-one-ghost-4level.solution b/test_cases/q2/3-one-ghost-4level.solution
new file mode 100644
index 0000000..5fcb2c5
--- /dev/null
+++ b/test_cases/q2/3-one-ghost-4level.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q2/3-one-ghost-4level.test.
+action: "Left"
+generated: "A B C D E F G H I J K L M N O P a b1 b2 c1 c2 c3 c4 d1 d2 d3 d4 d5 d6 d7 d8"
diff --git a/test_cases/q2/3-one-ghost-4level.test b/test_cases/q2/3-one-ghost-4level.test
new file mode 100644
index 0000000..6868528
--- /dev/null
+++ b/test_cases/q2/3-one-ghost-4level.test
@@ -0,0 +1,79 @@
+class: "GraphGameTreeTest"
+alg: "MinimaxAgent"
+depth: "4"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ / \ / \
+ c1 c2 c3 c4
+ / \ / \ / \ / \
+ d1 d2 d3 d4 d5 d6 d7 d8
+/ \ / \ / \ / \ / \ / \ / \ / \
+A B C D E F G H I J K L M N O P
+3 13 5 9 10 11 6 8 1 0 4 7 12 15 2 14
+
+a - max
+b - min
+c - max
+d - min
+"""
+num_agents: "2"
+
+start_state: "a"
+win_states: "A B C D E F G H I J K L M N O P"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Left c1
+b1 Right c2
+b2 Left c3
+b2 Right c4
+c1 Left d1
+c1 Right d2
+c2 Left d3
+c2 Right d4
+c3 Left d5
+c3 Right d6
+c4 Left d7
+c4 Right d8
+d1 Left A
+d1 Right B
+d2 Left C
+d2 Right D
+d3 Left E
+d3 Right F
+d4 Left G
+d4 Right H
+d5 Left I
+d5 Right J
+d6 Left K
+d6 Right L
+d7 Left M
+d7 Right N
+d8 Left O
+d8 Right P
+"""
+
+evaluation: """
+A 3.0
+B 13.0
+C 5.0
+D 9.0
+E 10.0
+F 11.0
+G 6.0
+H 8.0
+I 1.0
+J 0.0
+K 4.0
+L 7.0
+M 12.0
+N 15.0
+O 2.0
+P 14.0
+"""
diff --git a/test_cases/q2/4-two-ghosts-3level.solution b/test_cases/q2/4-two-ghosts-3level.solution
new file mode 100644
index 0000000..36a2326
--- /dev/null
+++ b/test_cases/q2/4-two-ghosts-3level.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q2/4-two-ghosts-3level.test.
+action: "Left"
+generated: "a b1 b2 c1 c2 c3 c4 d1 d2 d3 d4 d5 d6 d7 d8"
diff --git a/test_cases/q2/4-two-ghosts-3level.test b/test_cases/q2/4-two-ghosts-3level.test
new file mode 100644
index 0000000..a2c5bad
--- /dev/null
+++ b/test_cases/q2/4-two-ghosts-3level.test
@@ -0,0 +1,52 @@
+class: "GraphGameTreeTest"
+alg: "MinimaxAgent"
+depth: "3"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ / \ / \
+ c1 c2 c3 c4
+ / \ / \ / \ / \
+ d1 d2 d3 d4 d5 d6 d7 d8
+ 3 9 10 6 4 7 0 5
+
+a - max
+b - min
+c - min
+"""
+num_agents: "3"
+
+start_state: "a"
+win_states: "d1 d2 d3 d4 d5 d6 d7 d8"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Left c1
+b1 Right c2
+b2 Left c3
+b2 Right c4
+c1 Left d1
+c1 Right d2
+c2 Left d3
+c2 Right d4
+c3 Left d5
+c3 Right d6
+c4 Left d7
+c4 Right d8
+"""
+
+evaluation: """
+d1 3.0
+d2 9.0
+d3 10.0
+d4 6.0
+d5 4.0
+d6 7.0
+d7 0.0
+d8 5.0
+"""
diff --git a/test_cases/q2/5-two-ghosts-4level.solution b/test_cases/q2/5-two-ghosts-4level.solution
new file mode 100644
index 0000000..3164ec0
--- /dev/null
+++ b/test_cases/q2/5-two-ghosts-4level.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q2/5-two-ghosts-4level.test.
+action: "Left"
+generated: "A B C D E F G H I J K L M N O P a b1 b2 c1 c2 c3 c4 d1 d2 d3 d4 d5 d6 d7 d8"
diff --git a/test_cases/q2/5-two-ghosts-4level.test b/test_cases/q2/5-two-ghosts-4level.test
new file mode 100644
index 0000000..9eeb228
--- /dev/null
+++ b/test_cases/q2/5-two-ghosts-4level.test
@@ -0,0 +1,79 @@
+class: "GraphGameTreeTest"
+alg: "MinimaxAgent"
+depth: "4"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ / \ / \
+ c1 c2 c3 c4
+ / \ / \ / \ / \
+ d1 d2 d3 d4 d5 d6 d7 d8
+/ \ / \ / \ / \ / \ / \ / \ / \
+A B C D E F G H I J K L M N O P
+3 13 5 9 10 11 6 8 1 0 4 7 12 15 2 14
+
+a - max
+b - min
+c - min
+d - max
+"""
+num_agents: "3"
+
+start_state: "a"
+win_states: "A B C D E F G H I J K L M N O P"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Left c1
+b1 Right c2
+b2 Left c3
+b2 Right c4
+c1 Left d1
+c1 Right d2
+c2 Left d3
+c2 Right d4
+c3 Left d5
+c3 Right d6
+c4 Left d7
+c4 Right d8
+d1 Left A
+d1 Right B
+d2 Left C
+d2 Right D
+d3 Left E
+d3 Right F
+d4 Left G
+d4 Right H
+d5 Left I
+d5 Right J
+d6 Left K
+d6 Right L
+d7 Left M
+d7 Right N
+d8 Left O
+d8 Right P
+"""
+
+evaluation: """
+A 3.0
+B 13.0
+C 5.0
+D 9.0
+E 10.0
+F 11.0
+G 6.0
+H 8.0
+I 1.0
+J 0.0
+K 4.0
+L 7.0
+M 12.0
+N 15.0
+O 2.0
+P 14.0
+"""
diff --git a/test_cases/q2/6-tied-root.solution b/test_cases/q2/6-tied-root.solution
new file mode 100644
index 0000000..61918b0
--- /dev/null
+++ b/test_cases/q2/6-tied-root.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q2/6-tied-root.test.
+action: "Left"
+generated: "A B C max min1 min2"
diff --git a/test_cases/q2/6-tied-root.test b/test_cases/q2/6-tied-root.test
new file mode 100644
index 0000000..98cc012
--- /dev/null
+++ b/test_cases/q2/6-tied-root.test
@@ -0,0 +1,31 @@
+class: "GraphGameTreeTest"
+alg: "MinimaxAgent"
+depth: "3"
+
+diagram: """
+ max
+ / \
+min1 min2
+ | / \
+ A B C
+10 10 0
+"""
+num_agents: "2"
+
+start_state: "max"
+win_states: "A B"
+lose_states: "C"
+
+successors: """
+max Left min1
+max Right min2
+min1 Down A
+min2 Left B
+min2 Right C
+"""
+
+evaluation: """
+A 10.0
+B 10.0
+C 0.0
+"""
diff --git a/test_cases/q2/7-1a-check-depth-one-ghost.solution b/test_cases/q2/7-1a-check-depth-one-ghost.solution
new file mode 100644
index 0000000..0b7f01b
--- /dev/null
+++ b/test_cases/q2/7-1a-check-depth-one-ghost.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q2/7-1a-check-depth-one-ghost.test.
+action: "Left"
+generated: "a b1 b2 b3 c1 c2 c3"
diff --git a/test_cases/q2/7-1a-check-depth-one-ghost.test b/test_cases/q2/7-1a-check-depth-one-ghost.test
new file mode 100644
index 0000000..077effc
--- /dev/null
+++ b/test_cases/q2/7-1a-check-depth-one-ghost.test
@@ -0,0 +1,83 @@
+class: "GraphGameTreeTest"
+alg: "MinimaxAgent"
+depth: "1"
+
+diagram: """
+ a
+ /-/ | \--\
+ / | \
+ 0 b1 0 b2 b3 8
+ | | |
+ 10 c1 0 c2 c3 8
+ | | |
+ 0 d1 0 d2 d3 8
+ | | |
+ 0 e1 10 e2 e3 8
+ | | |
+ 0 f1 0 f2 f3 8
+ | | |
+ g1 g2 g3
+ 0 0 8
+
+a - max
+b - min
+c - max
+d - min
+e - max
+f - min
+
+At depth 1, the evaluation function is called at level c,
+so Left should be returned. If your algorithm is returning a
+different action, check how you implemented your depth.
+"""
+
+num_agents: "2"
+
+start_state: "a"
+win_states: "g1 g2 g3"
+lose_states: ""
+
+successors: """
+a Left b1
+a Center b2
+a Right b3
+b1 Center c1
+b2 Center c2
+b3 Center c3
+c1 Center d1
+c2 Center d2
+c3 Center d3
+d1 Center e1
+d2 Center e2
+d3 Center e3
+e1 Center f1
+e2 Center f2
+e3 Center f3
+f1 Center g1
+f2 Center g2
+f3 Center g3
+"""
+
+
+evaluation: """
+b1 0.0
+b2 0.0
+b3 8.0
+c1 10.0
+c2 0.0
+c3 8.0
+d1 0.0
+d2 0.0
+d3 8.0
+e1 0.0
+e2 10.0
+e3 8.0
+f1 0.0
+f2 0.0
+f3 8.0
+g1 0.0
+g2 0.0
+g3 8.0
+"""
+
+
diff --git a/test_cases/q2/7-1b-check-depth-one-ghost.solution b/test_cases/q2/7-1b-check-depth-one-ghost.solution
new file mode 100644
index 0000000..e7e3c6f
--- /dev/null
+++ b/test_cases/q2/7-1b-check-depth-one-ghost.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q2/7-1b-check-depth-one-ghost.test.
+action: "Center"
+generated: "a b1 b2 b3 c1 c2 c3 d1 d2 d3 e1 e2 e3"
diff --git a/test_cases/q2/7-1b-check-depth-one-ghost.test b/test_cases/q2/7-1b-check-depth-one-ghost.test
new file mode 100644
index 0000000..53cefd4
--- /dev/null
+++ b/test_cases/q2/7-1b-check-depth-one-ghost.test
@@ -0,0 +1,83 @@
+class: "GraphGameTreeTest"
+alg: "MinimaxAgent"
+depth: "2"
+
+diagram: """
+ a
+ /-/ | \--\
+ / | \
+ 0 b1 0 b2 b3 8
+ | | |
+ 10 c1 0 c2 c3 8
+ | | |
+ 0 d1 0 d2 d3 8
+ | | |
+ 0 e1 10 e2 e3 8
+ | | |
+ 0 f1 0 f2 f3 8
+ | | |
+ g1 g2 g3
+ 0 0 8
+
+a - max
+b - min
+c - max
+d - min
+e - max
+f - min
+
+At depth 2, the evaluation function is called at level e,
+so Center should be returned. If your algorithm is returning a
+different action, check how you implemented your depth.
+"""
+
+num_agents: "2"
+
+start_state: "a"
+win_states: "g1 g2 g3"
+lose_states: ""
+
+successors: """
+a Left b1
+a Center b2
+a Right b3
+b1 Center c1
+b2 Center c2
+b3 Center c3
+c1 Center d1
+c2 Center d2
+c3 Center d3
+d1 Center e1
+d2 Center e2
+d3 Center e3
+e1 Center f1
+e2 Center f2
+e3 Center f3
+f1 Center g1
+f2 Center g2
+f3 Center g3
+"""
+
+
+evaluation: """
+b1 0.0
+b2 0.0
+b3 8.0
+c1 10.0
+c2 0.0
+c3 8.0
+d1 0.0
+d2 0.0
+d3 8.0
+e1 0.0
+e2 10.0
+e3 8.0
+f1 0.0
+f2 0.0
+f3 8.0
+g1 0.0
+g2 0.0
+g3 8.0
+"""
+
+
diff --git a/test_cases/q2/7-1c-check-depth-one-ghost.solution b/test_cases/q2/7-1c-check-depth-one-ghost.solution
new file mode 100644
index 0000000..188854d
--- /dev/null
+++ b/test_cases/q2/7-1c-check-depth-one-ghost.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q2/7-1c-check-depth-one-ghost.test.
+action: "Right"
+generated: "a b1 b2 b3 c1 c2 c3 d1 d2 d3 e1 e2 e3 f1 f2 f3 g1 g2 g3"
diff --git a/test_cases/q2/7-1c-check-depth-one-ghost.test b/test_cases/q2/7-1c-check-depth-one-ghost.test
new file mode 100644
index 0000000..51a5c9a
--- /dev/null
+++ b/test_cases/q2/7-1c-check-depth-one-ghost.test
@@ -0,0 +1,83 @@
+class: "GraphGameTreeTest"
+alg: "MinimaxAgent"
+depth: "3"
+
+diagram: """
+ a
+ /-/ | \--\
+ / | \
+ 0 b1 0 b2 b3 8
+ | | |
+ 10 c1 0 c2 c3 8
+ | | |
+ 0 d1 0 d2 d3 8
+ | | |
+ 0 e1 10 e2 e3 8
+ | | |
+ 0 f1 0 f2 f3 8
+ | | |
+ g1 g2 g3
+ 0 0 8
+
+a - max
+b - min
+c - max
+d - min
+e - max
+f - min
+
+At depth 3, the evaluation function is called at level g,
+so Right should be returned. If your algorithm is returning a
+different action, check how you implemented your depth.
+"""
+
+num_agents: "2"
+
+start_state: "a"
+win_states: "g1 g2 g3"
+lose_states: ""
+
+successors: """
+a Left b1
+a Center b2
+a Right b3
+b1 Center c1
+b2 Center c2
+b3 Center c3
+c1 Center d1
+c2 Center d2
+c3 Center d3
+d1 Center e1
+d2 Center e2
+d3 Center e3
+e1 Center f1
+e2 Center f2
+e3 Center f3
+f1 Center g1
+f2 Center g2
+f3 Center g3
+"""
+
+
+evaluation: """
+b1 0.0
+b2 0.0
+b3 8.0
+c1 10.0
+c2 0.0
+c3 8.0
+d1 0.0
+d2 0.0
+d3 8.0
+e1 0.0
+e2 10.0
+e3 8.0
+f1 0.0
+f2 0.0
+f3 8.0
+g1 0.0
+g2 0.0
+g3 8.0
+"""
+
+
diff --git a/test_cases/q2/7-2a-check-depth-two-ghosts.solution b/test_cases/q2/7-2a-check-depth-two-ghosts.solution
new file mode 100644
index 0000000..649825a
--- /dev/null
+++ b/test_cases/q2/7-2a-check-depth-two-ghosts.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q2/7-2a-check-depth-two-ghosts.test.
+action: "Left"
+generated: "a b1 b2 b3 c1 c2 c3 d1 d2 d3"
diff --git a/test_cases/q2/7-2a-check-depth-two-ghosts.test b/test_cases/q2/7-2a-check-depth-two-ghosts.test
new file mode 100644
index 0000000..c8fc743
--- /dev/null
+++ b/test_cases/q2/7-2a-check-depth-two-ghosts.test
@@ -0,0 +1,110 @@
+class: "GraphGameTreeTest"
+alg: "MinimaxAgent"
+depth: "1"
+
+diagram: """
+ a
+ /-/ | \--\
+ / | \
+ 0 b1 0 b2 b3 8
+ | | |
+ 0 c1 0 c2 c3 8
+ | | |
+ 10 d1 0 d2 d3 8
+ | | |
+ 0 e1 0 e2 e3 8
+ | | |
+ 0 f1 0 f2 f3 8
+ | | |
+ 0 g1 10 g2 g3 8
+ | | |
+ 0 h1 0 h2 h3 8
+ | | |
+ 0 i1 0 i2 i3 8
+ | | |
+ j1 j2 j3
+ 0 0 8
+
+a - max
+b - min
+c - min
+d - max
+e - min
+f - min
+g - max
+h - min
+i - min
+
+At depth 1, the evaluation function is called at level d,
+so Left should be returned. If your algorithm is returning a
+different action, check how you implemented your depth.
+"""
+
+num_agents: "3"
+
+start_state: "a"
+win_states: "j1 j2 j3"
+lose_states: ""
+
+successors: """
+a Left b1
+a Center b2
+a Right b3
+b1 Center c1
+b2 Center c2
+b3 Center c3
+c1 Center d1
+c2 Center d2
+c3 Center d3
+d1 Center e1
+d2 Center e2
+d3 Center e3
+e1 Center f1
+e2 Center f2
+e3 Center f3
+f1 Center g1
+f2 Center g2
+f3 Center g3
+g1 Center h1
+g2 Center h2
+g3 Center h3
+h1 Center i1
+h2 Center i2
+h3 Center i3
+i1 Center j1
+i2 Center j2
+i3 Center j3
+"""
+
+
+evaluation: """
+b1 0.0
+b2 0.0
+b3 8.0
+c1 0.0
+c2 0.0
+c3 8.0
+d1 10.0
+d2 0.0
+d3 8.0
+e1 0.0
+e2 0.0
+e3 8.0
+f1 0.0
+f2 0.0
+f3 8.0
+g1 0.0
+g2 10.0
+g3 8.0
+h1 0.0
+h2 0.0
+h3 8.0
+i1 0.0
+i2 0.0
+i3 8.0
+j1 0.0
+j2 0.0
+j3 8.0
+"""
+
+
diff --git a/test_cases/q2/7-2b-check-depth-two-ghosts.solution b/test_cases/q2/7-2b-check-depth-two-ghosts.solution
new file mode 100644
index 0000000..440eaf8
--- /dev/null
+++ b/test_cases/q2/7-2b-check-depth-two-ghosts.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q2/7-2b-check-depth-two-ghosts.test.
+action: "Center"
+generated: "a b1 b2 b3 c1 c2 c3 d1 d2 d3 e1 e2 e3 f1 f2 f3 g1 g2 g3"
diff --git a/test_cases/q2/7-2b-check-depth-two-ghosts.test b/test_cases/q2/7-2b-check-depth-two-ghosts.test
new file mode 100644
index 0000000..2dff6d0
--- /dev/null
+++ b/test_cases/q2/7-2b-check-depth-two-ghosts.test
@@ -0,0 +1,110 @@
+class: "GraphGameTreeTest"
+alg: "MinimaxAgent"
+depth: "2"
+
+diagram: """
+ a
+ /-/ | \--\
+ / | \
+ 0 b1 0 b2 b3 8
+ | | |
+ 0 c1 0 c2 c3 8
+ | | |
+ 10 d1 0 d2 d3 8
+ | | |
+ 0 e1 0 e2 e3 8
+ | | |
+ 0 f1 0 f2 f3 8
+ | | |
+ 0 g1 10 g2 g3 8
+ | | |
+ 0 h1 0 h2 h3 8
+ | | |
+ 0 i1 0 i2 i3 8
+ | | |
+ j1 j2 j3
+ 0 0 8
+
+a - max
+b - min
+c - min
+d - max
+e - min
+f - min
+g - max
+h - min
+i - min
+
+At depth 2, the evaluation function is called at level g,
+so Center should be returned. If your algorithm is returning
+a different action, check how you implemented your depth.
+"""
+
+num_agents: "3"
+
+start_state: "a"
+win_states: "j1 j2 j3"
+lose_states: ""
+
+successors: """
+a Left b1
+a Center b2
+a Right b3
+b1 Center c1
+b2 Center c2
+b3 Center c3
+c1 Center d1
+c2 Center d2
+c3 Center d3
+d1 Center e1
+d2 Center e2
+d3 Center e3
+e1 Center f1
+e2 Center f2
+e3 Center f3
+f1 Center g1
+f2 Center g2
+f3 Center g3
+g1 Center h1
+g2 Center h2
+g3 Center h3
+h1 Center i1
+h2 Center i2
+h3 Center i3
+i1 Center j1
+i2 Center j2
+i3 Center j3
+"""
+
+
+evaluation: """
+b1 0.0
+b2 0.0
+b3 8.0
+c1 0.0
+c2 0.0
+c3 8.0
+d1 10.0
+d2 0.0
+d3 8.0
+e1 0.0
+e2 0.0
+e3 8.0
+f1 0.0
+f2 0.0
+f3 8.0
+g1 0.0
+g2 10.0
+g3 8.0
+h1 0.0
+h2 0.0
+h3 8.0
+i1 0.0
+i2 0.0
+i3 8.0
+j1 0.0
+j2 0.0
+j3 8.0
+"""
+
+
diff --git a/test_cases/q2/7-2c-check-depth-two-ghosts.solution b/test_cases/q2/7-2c-check-depth-two-ghosts.solution
new file mode 100644
index 0000000..fb146d8
--- /dev/null
+++ b/test_cases/q2/7-2c-check-depth-two-ghosts.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q2/7-2c-check-depth-two-ghosts.test.
+action: "Right"
+generated: "a b1 b2 b3 c1 c2 c3 d1 d2 d3 e1 e2 e3 f1 f2 f3 g1 g2 g3 h1 h2 h3 i1 i2 i3 j1 j2 j3"
diff --git a/test_cases/q2/7-2c-check-depth-two-ghosts.test b/test_cases/q2/7-2c-check-depth-two-ghosts.test
new file mode 100644
index 0000000..2ef9f15
--- /dev/null
+++ b/test_cases/q2/7-2c-check-depth-two-ghosts.test
@@ -0,0 +1,110 @@
+class: "GraphGameTreeTest"
+alg: "MinimaxAgent"
+depth: "3"
+
+diagram: """
+ a
+ /-/ | \--\
+ / | \
+ 0 b1 0 b2 b3 8
+ | | |
+ 0 c1 0 c2 c3 8
+ | | |
+ 10 d1 0 d2 d3 8
+ | | |
+ 0 e1 0 e2 e3 8
+ | | |
+ 0 f1 0 f2 f3 8
+ | | |
+ 0 g1 10 g2 g3 8
+ | | |
+ 0 h1 0 h2 h3 8
+ | | |
+ 0 i1 0 i2 i3 8
+ | | |
+ j1 j2 j3
+ 0 0 8
+
+a - max
+b - min
+c - min
+d - max
+e - min
+f - min
+g - max
+h - min
+i - min
+
+At depth 3, the evaluation function is called at level j,
+so Right should be returned. If your algorithm is returning
+a different action, check how you implemented your depth.
+"""
+
+num_agents: "3"
+
+start_state: "a"
+win_states: "j1 j2 j3"
+lose_states: ""
+
+successors: """
+a Left b1
+a Center b2
+a Right b3
+b1 Center c1
+b2 Center c2
+b3 Center c3
+c1 Center d1
+c2 Center d2
+c3 Center d3
+d1 Center e1
+d2 Center e2
+d3 Center e3
+e1 Center f1
+e2 Center f2
+e3 Center f3
+f1 Center g1
+f2 Center g2
+f3 Center g3
+g1 Center h1
+g2 Center h2
+g3 Center h3
+h1 Center i1
+h2 Center i2
+h3 Center i3
+i1 Center j1
+i2 Center j2
+i3 Center j3
+"""
+
+
+evaluation: """
+b1 0.0
+b2 0.0
+b3 8.0
+c1 0.0
+c2 0.0
+c3 8.0
+d1 10.0
+d2 0.0
+d3 8.0
+e1 0.0
+e2 0.0
+e3 8.0
+f1 0.0
+f2 0.0
+f3 8.0
+g1 0.0
+g2 10.0
+g3 8.0
+h1 0.0
+h2 0.0
+h3 8.0
+i1 0.0
+i2 0.0
+i3 8.0
+j1 0.0
+j2 0.0
+j3 8.0
+"""
+
+
diff --git a/test_cases/q2/8-pacman-game.solution b/test_cases/q2/8-pacman-game.solution
new file mode 100644
index 0000000..4ee19d8
--- /dev/null
+++ b/test_cases/q2/8-pacman-game.solution
@@ -0,0 +1,444 @@
+optimalActions: """
+[[["West", "East"], 59], [["West", "East"], 35]]
+[[["West"], 190], [["West"], 127]]
+[[["West"], 190], [["West"], 135]]
+[[["West", "North"], 120], [["West", "North"], 82]]
+[[["West"], 77], [["West"], 57]]
+[[["West", "North"], 143], [["West", "North"], 97]]
+[[["West"], 155], [["West"], 110]]
+[[["West"], 40], [["West"], 27]]
+[[["North"], 64], [["North"], 43]]
+[[["North"], 85], [["North"], 57]]
+[[["North"], 106], [["North"], 71]]
+[[["North"], 97], [["North"], 65]]
+[[["Stop", "East"], 154], [["East"], 103]]
+[[["East"], 156], [["East"], 101]]
+[[["West"], 30], [["West"], 17]]
+[[["South"], 15], [["South"], 9]]
+[[["South"], 15], [["South"], 9]]
+[[["South"], 15], [["South"], 9]]
+[[["South"], 15], [["South"], 9]]
+[[["East"], 15], [["East"], 9]]
+[[["East"], 15], [["East"], 9]]
+[[["East"], 18], [["East"], 12]]
+[[["North"], 29], [["North"], 18]]
+[[["North"], 50], [["North"], 31]]
+[[["West"], 55], [["West"], 36]]
+[[["East"], 29], [["East"], 16]]
+[[["North"], 89], [["North"], 61]]
+[[["East", "North"], 161], [["East", "North"], 121]]
+[[["East", "North"], 221], [["East", "North"], 166]]
+[[["North", "South"], 105], [["North", "South"], 77]]
+[[["West"], 69], [["West"], 51]]
+[[["West"], 94], [["West"], 69]]
+[[["West", "Stop"], 57], [["West"], 42]]
+[[["West", "Stop", "East"], 69], [["West", "East"], 49]]
+[[["West", "Stop", "East"], 61], [["West", "East"], 41]]
+[[["Stop", "East", "South"], 55], [["East", "South"], 37]]
+[[["Stop", "East", "South"], 28], [["East", "South"], 19]]
+[[["Stop", "East", "South"], 34], [["East", "South"], 23]]
+[[["Stop", "East", "South"], 55], [["East", "South"], 37]]
+[[["Stop", "East", "South"], 55], [["East", "South"], 37]]
+[[["Stop", "East", "South"], 61], [["East", "South"], 41]]
+[[["Stop", "East", "South"], 85], [["East", "South"], 57]]
+[[["Stop", "East", "South"], 64], [["East", "South"], 43]]
+[[["Stop", "East", "South"], 61], [["East", "South"], 41]]
+[[["Stop", "East", "South"], 61], [["East", "South"], 41]]
+[[["Stop", "East", "South"], 85], [["East", "South"], 57]]
+[[["Stop", "East", "South"], 102], [["East", "South"], 67]]
+[[["Stop", "South"], 23], [["South"], 13]]
+[[["South"], 15], [["South"], 9]]
+[[["South"], 15], [["South"], 9]]
+[[["South"], 15], [["South"], 9]]
+[[["South"], 15], [["South"], 9]]
+[[["East"], 15], [["East"], 9]]
+[[["East"], 15], [["East"], 9]]
+[[["East"], 18], [["East"], 12]]
+[[["East", "North"], 29], [["East", "North"], 18]]
+[[["East"], 38], [["East"], 22]]
+[[["North"], 29], [["North"], 18]]
+[[["North"], 38], [["North"], 22]]
+[[["East"], 33], [["East"], 22]]
+[[["East"], 37], [["East"], 18]]
+[[["East"], 18], [["East"], 12]]
+[[["East"], 37], [["East"], 26]]
+[[["East"], 69], [["East"], 41]]
+[[["East"], 56], [["East"], 26]]
+[[["East"], 44], [["East"], 29]]
+[[["North", "South"], 83], [["North", "South"], 52]]
+[[["East", "North"], 121], [["East", "North"], 74]]
+[[["East", "North"], 97], [["East", "North"], 73]]
+[[["North", "South"], 173], [["North", "South"], 130]]
+[[["West", "East"], 90], [["West", "East"], 66]]
+[[["West", "Stop", "East"], 161], [["West", "East"], 118]]
+[[["Stop", "East", "South"], 58], [["East", "South"], 43]]
+[[["Stop", "East"], 120], [["South"], 85]]
+[[["East"], 78], [["East"], 45]]
+[[["West"], 77], [["West"], 42]]
+[[["South"], 83], [["South"], 48]]
+[[["South"], 49], [["South"], 37]]
+[[["South"], 185], [["South"], 104]]
+[[["South"], 68], [["South"], 41]]
+[[["West"], 30], [["West"], 18]]
+[[["West"], 56], [["West"], 29]]
+[[["West"], 14], [["West"], 10]]
+[[["West"], 20], [["West"], 14]]
+[[["West"], 13], [["West"], 9]]
+[[["West"], 13], [["West"], 9]]
+[[["West"], 16], [["West"], 12]]
+[[["West", "North"], 30], [["West", "North"], 20]]
+[[["West"], 38], [["West"], 23]]
+[[["West", "Stop", "East", "North"], 70], [["West", "East", "North"], 46]]
+[[["West", "Stop", "East"], 128], [["West", "East"], 89]]
+[[["West", "Stop", "East"], 31], [["West", "East"], 20]]
+[[["Stop", "East", "North"], 69], [["East", "North"], 45]]
+[[["Stop", "North"], 58], [["North"], 31]]
+[[["North"], 34], [["North"], 19]]
+[[["North"], 30], [["North"], 17]]
+[[["North"], 19], [["North"], 11]]
+[[["North"], 34], [["North"], 19]]
+[[["East"], 30], [["East"], 17]]
+[[["East"], 19], [["East"], 11]]
+[[["East"], 44], [["East"], 29]]
+[[["East", "South"], 87], [["East", "South"], 60]]
+[[["East", "South"], 108], [["East", "South"], 62]]
+[[["South"], 120], [["South"], 61]]
+[[["North", "South"], 209], [["North", "South"], 132]]
+[[["West"], 108], [["West"], 60]]
+[[["West", "Stop", "East", "South"], 83], [["West", "East", "South"], 61]]
+[[["West", "Stop", "East", "South"], 90], [["West", "East", "South"], 66]]
+[[["West", "Stop", "East"], 134], [["West", "East"], 95]]
+[[["West", "Stop", "East"], 82], [["West", "East"], 55]]
+[[["Stop", "East", "South"], 142], [["East", "South"], 95]]
+[[["Stop", "East", "South"], 98], [["East", "South"], 65]]
+[[["Stop", "East", "South"], 128], [["East", "South"], 86]]
+[[["Stop", "East", "South"], 82], [["East", "South"], 55]]
+[[["Stop", "East", "South"], 85], [["East", "South"], 57]]
+[[["Stop", "East", "South"], 190], [["East", "South"], 127]]
+[[["Stop", "East", "South"], 158], [["East", "South"], 103]]
+[[["Stop", "South"], 50], [["South"], 27]]
+[[["South"], 30], [["South"], 17]]
+[[["South"], 15], [["South"], 9]]
+[[["South"], 15], [["South"], 9]]
+[[["South"], 15], [["South"], 9]]
+[[["East"], 15], [["East"], 9]]
+[[["East"], 15], [["East"], 9]]
+[[["East"], 18], [["East"], 12]]
+[[["East", "North"], 29], [["East", "North"], 18]]
+[[["East"], 37], [["East"], 22]]
+[[["East", "North"], 41], [["East", "North"], 24]]
+[[["East"], 59], [["East"], 29]]
+[[["East"], 19], [["East"], 11]]
+[[["East"], 26], [["East"], 15]]
+[[["East"], 15], [["East"], 9]]
+[[["East"], 15], [["East"], 9]]
+[[["East"], 18], [["East"], 12]]
+[[["East"], 29], [["East"], 18]]
+[[["East"], 37], [["East"], 22]]
+[[["East", "North"], 41], [["East", "North"], 24]]
+[[["East"], 59], [["East"], 29]]
+[[["East"], 19], [["East"], 11]]
+[[["North"], 26], [["North"], 15]]
+[[["North"], 19], [["North"], 11]]
+[[["North"], 30], [["North"], 17]]
+[[["North"], 34], [["North"], 19]]
+[[["West"], 34], [["West"], 19]]
+[[["West"], 25], [["West"], 13]]
+[[["West", "Stop", "East"], 7], [["West", "East"], 3]]
+"""
+altDepthActions: """
+[["West", "East"], ["West", "East"], ["West", "East"], ["West", "East"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West", "North"], ["West", "North"], ["West", "North"], ["West", "North"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West", "North"], ["West", "North"], ["West", "North"], ["West", "North"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["North"], ["North"], ["Stop", "North"], ["North"]]
+[["East"], ["East"], ["Stop", "East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["North"], ["North"], ["West"], ["West"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["East", "North"], ["East", "North"], ["East", "North"], ["East", "North"]]
+[["East", "North"], ["East", "North"], ["East", "North"], ["East", "North"]]
+[["North", "South"], ["North", "South"], ["North"], ["North"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West"], ["West"], ["West", "Stop"], ["West"]]
+[["West"], ["West"], ["West", "Stop", "East", "South"], ["West", "East", "South"]]
+[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]]
+[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "South"], ["South"], ["Stop", "South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East", "North"], ["East", "North"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["North", "South"], ["North", "South"], ["South"], ["South"]]
+[["East", "North"], ["East", "North"], ["East", "North"], ["East", "North"]]
+[["East", "North"], ["East", "North"], ["East", "North"], ["East", "North"]]
+[["North", "South"], ["North", "South"], ["North"], ["North"]]
+[["West", "East"], ["West", "East"], ["East"], ["East"]]
+[["West"], ["West"], ["East"], ["East"]]
+[["Stop", "East", "South"], ["East", "South"], ["East"], ["East"]]
+[["Stop", "East"], ["East"], ["Stop", "East"], ["South"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["West", "East"], ["West", "East"], ["West"], ["West"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West", "North"], ["West", "North"], ["West", "North"], ["West", "North"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West", "Stop", "East", "North"], ["West", "East", "North"], ["West", "Stop", "East", "North"], ["West", "East", "North"]]
+[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]]
+[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]]
+[["Stop", "East", "North"], ["East", "North"], ["Stop", "East", "North"], ["East", "North"]]
+[["Stop", "North"], ["North"], ["Stop", "North"], ["North"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East", "South"], ["East", "South"], ["East", "South"], ["East", "South"]]
+[["East", "South"], ["East", "South"], ["East", "South"], ["East", "South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["North", "South"], ["North", "South"], ["North", "South"], ["North", "South"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West", "Stop", "East", "South"], ["West", "East", "South"], ["West", "Stop", "East", "South"], ["West", "East", "South"]]
+[["West", "Stop", "East", "South"], ["West", "East", "South"], ["West", "Stop", "East", "South"], ["West", "East", "South"]]
+[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]]
+[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "South"], ["South"], ["Stop", "South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East", "North"], ["East", "North"], ["East", "North"], ["East", "North"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East", "North"], ["East", "North"], ["East", "North"], ["East", "North"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East", "North"], ["East", "North"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]]
+"""
+partialPlyBugActions: """
+[["West", "East"], ["West", "East"]]
+[["West"], ["West"]]
+[["West"], ["West"]]
+[["West", "North"], ["West", "North"]]
+[["West"], ["West"]]
+[["West", "North"], ["West", "North"]]
+[["West"], ["West"]]
+[["West"], ["West"]]
+[["North"], ["North"]]
+[["North"], ["North"]]
+[["North"], ["North"]]
+[["North"], ["North"]]
+[["East"], ["East"]]
+[["Stop", "East"], ["East"]]
+[["West"], ["West"]]
+[["South"], ["South"]]
+[["South"], ["South"]]
+[["South"], ["South"]]
+[["South"], ["South"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["North"], ["North"]]
+[["North"], ["North"]]
+[["West"], ["West"]]
+[["East"], ["East"]]
+[["North"], ["North"]]
+[["East", "North"], ["East", "North"]]
+[["East", "North"], ["East", "North"]]
+[["North", "South"], ["North", "South"]]
+[["West"], ["West"]]
+[["West"], ["West"]]
+[["West", "Stop"], ["West"]]
+[["West", "Stop", "East"], ["West", "East"]]
+[["West", "Stop", "East"], ["West", "East"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "South"], ["South"]]
+[["South"], ["South"]]
+[["South"], ["South"]]
+[["South"], ["South"]]
+[["South"], ["South"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East", "North"], ["East", "North"]]
+[["East"], ["East"]]
+[["North"], ["North"]]
+[["North"], ["North"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["North", "South"], ["North", "South"]]
+[["East", "North"], ["East", "North"]]
+[["East", "North"], ["East", "North"]]
+[["North", "South"], ["North", "South"]]
+[["West", "East"], ["West", "East"]]
+[["West", "Stop", "East"], ["West", "East"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East"], ["East"]]
+[["East"], ["East"]]
+[["West"], ["West"]]
+[["South"], ["South"]]
+[["South"], ["South"]]
+[["South"], ["South"]]
+[["South"], ["South"]]
+[["West"], ["West"]]
+[["West"], ["West"]]
+[["West"], ["West"]]
+[["West"], ["West"]]
+[["West"], ["West"]]
+[["West"], ["West"]]
+[["West"], ["West"]]
+[["West", "North"], ["West", "North"]]
+[["West"], ["West"]]
+[["West", "Stop", "East", "North"], ["West", "East", "North"]]
+[["West", "Stop", "East"], ["West", "East"]]
+[["West", "Stop", "East"], ["West", "East"]]
+[["Stop", "East", "North"], ["East", "North"]]
+[["Stop", "North"], ["North"]]
+[["North"], ["North"]]
+[["North"], ["North"]]
+[["North"], ["North"]]
+[["North"], ["North"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East", "South"], ["East", "South"]]
+[["East", "South"], ["East", "South"]]
+[["South"], ["South"]]
+[["North", "South"], ["North", "South"]]
+[["West"], ["West"]]
+[["West", "Stop", "East", "South"], ["West", "East", "South"]]
+[["West", "Stop", "East", "South"], ["West", "East", "South"]]
+[["West", "Stop", "East"], ["West", "East"]]
+[["West", "Stop", "East"], ["West", "East"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "South"], ["South"]]
+[["South"], ["South"]]
+[["South"], ["South"]]
+[["South"], ["South"]]
+[["South"], ["South"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East", "North"], ["East", "North"]]
+[["East"], ["East"]]
+[["East", "North"], ["East", "North"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East", "North"], ["East", "North"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["North"], ["North"]]
+[["North"], ["North"]]
+[["North"], ["North"]]
+[["North"], ["North"]]
+[["West"], ["West"]]
+[["West"], ["West"]]
+[["West", "Stop", "East"], ["West", "East"]]
+"""
diff --git a/test_cases/q2/8-pacman-game.test b/test_cases/q2/8-pacman-game.test
new file mode 100644
index 0000000..be7396d
--- /dev/null
+++ b/test_cases/q2/8-pacman-game.test
@@ -0,0 +1,19 @@
+class: "PacmanGameTreeTest"
+alg: "MinimaxAgent"
+seed: "0"
+depth: "2"
+max_points: "4"
+
+# The following specifies the layout to be used
+layoutName: "smallClassic"
+layout: """
+%%%%%%%%%%%%%%%%%%%%
+%......%G G%......%
+%.%%...%% %%...%%.%
+%.%o.%........%.o%.%
+%.%%.%.%%%%%%.%.%%.%
+%........P.........%
+%%%%%%%%%%%%%%%%%%%%
+"""
+
+
diff --git a/test_cases/q2/CONFIG b/test_cases/q2/CONFIG
new file mode 100644
index 0000000..a5adc3f
--- /dev/null
+++ b/test_cases/q2/CONFIG
@@ -0,0 +1,2 @@
+max_points: "5"
+class: "PassAllTestsQuestion"
diff --git a/test_cases/q3/0-lecture-6-tree.solution b/test_cases/q3/0-lecture-6-tree.solution
new file mode 100644
index 0000000..c4a91ad
--- /dev/null
+++ b/test_cases/q3/0-lecture-6-tree.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q3/0-lecture-6-tree.test.
+action: "Center"
+generated: "A B C D E F G H max min1 min2 min3"
diff --git a/test_cases/q3/0-lecture-6-tree.test b/test_cases/q3/0-lecture-6-tree.test
new file mode 100644
index 0000000..cbc61fe
--- /dev/null
+++ b/test_cases/q3/0-lecture-6-tree.test
@@ -0,0 +1,50 @@
+class: "GraphGameTreeTest"
+alg: "AlphaBetaAgent"
+depth: "2"
+
+# Tree from lecture 6 slides
+diagram: """
+ max
+ /-/ | \--\
+ / | \
+ / | \
+ min1 min2 min3
+ /|\ /|\ /|\
+ / | \ / | \ / | \
+A B C D E F G H I
+3 12 8 5 4 6 14 1 11
+"""
+
+num_agents: "2"
+
+start_state: "max"
+win_states: "A B C D E F G H I"
+lose_states: ""
+
+successors: """
+max Left min1
+max Center min2
+max Right min3
+min1 Left A
+min1 Center B
+min1 Right C
+min2 Left D
+min2 Center E
+min2 Right F
+min3 Left G
+min3 Center H
+min3 Right I
+"""
+
+
+evaluation: """
+A 3.0
+B 12.0
+C 8.0
+D 5.0
+E 4.0
+F 6.0
+G 14.0
+H 1.0
+I 11.0
+"""
diff --git a/test_cases/q3/0-small-tree.solution b/test_cases/q3/0-small-tree.solution
new file mode 100644
index 0000000..e940986
--- /dev/null
+++ b/test_cases/q3/0-small-tree.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q3/0-small-tree.test.
+action: "pacLeft"
+generated: "A B C minLeft minRight root"
diff --git a/test_cases/q3/0-small-tree.test b/test_cases/q3/0-small-tree.test
new file mode 100644
index 0000000..a9829d0
--- /dev/null
+++ b/test_cases/q3/0-small-tree.test
@@ -0,0 +1,36 @@
+class: "GraphGameTreeTest"
+alg: "AlphaBetaAgent"
+depth: "3"
+
+diagram: """
+ root
+ / \
+ minLeft minRight
+ / \ / \
+ A B C deeper
+ 4 3 2 |
+ D
+ 1000
+"""
+num_agents: "2"
+
+start_state: "root"
+win_states: "A C"
+lose_states: "B D"
+
+successors: """
+root pacLeft minLeft
+root pacRight minRight
+minLeft gLeft A
+minLeft gRight B
+minRight gLeft C
+minRight gRight deeper
+deeper pacLeft D
+"""
+
+evaluation: """
+A 4.0
+B 3.0
+C 2.0
+D 1000.0
+"""
diff --git a/test_cases/q3/1-1-minmax.solution b/test_cases/q3/1-1-minmax.solution
new file mode 100644
index 0000000..2834b29
--- /dev/null
+++ b/test_cases/q3/1-1-minmax.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q3/1-1-minmax.test.
+action: "Left"
+generated: "a b1 b2 c1 c2 cx d1 d2 d3 dx"
diff --git a/test_cases/q3/1-1-minmax.test b/test_cases/q3/1-1-minmax.test
new file mode 100644
index 0000000..c601316
--- /dev/null
+++ b/test_cases/q3/1-1-minmax.test
@@ -0,0 +1,47 @@
+class: "GraphGameTreeTest"
+alg: "AlphaBetaAgent"
+depth: "3"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ / \ |
+ c1 c2 cx
+ / \ / \ |
+ d1 d2 d3 d4 dx
+-3 -9 10 6 -3.01
+
+a - max
+b - min
+c - max
+
+Note that the minimax value of b1 is -3.
+"""
+num_agents: "2"
+
+start_state: "a"
+win_states: "d1 d2 d3 d4 dx"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Left c1
+b1 Right c2
+b2 Down cx
+c1 Left d1
+c1 Right d2
+c2 Left d3
+c2 Right d4
+cx Down dx
+"""
+
+evaluation: """
+d1 -3.0
+d2 -9.0
+d3 10.0
+d4 6.0
+dx -3.01
+"""
diff --git a/test_cases/q3/1-2-minmax.solution b/test_cases/q3/1-2-minmax.solution
new file mode 100644
index 0000000..8c17bcf
--- /dev/null
+++ b/test_cases/q3/1-2-minmax.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q3/1-2-minmax.test.
+action: "Right"
+generated: "a b1 b2 c1 c2 cx d1 d2 d3 dx"
diff --git a/test_cases/q3/1-2-minmax.test b/test_cases/q3/1-2-minmax.test
new file mode 100644
index 0000000..d42a467
--- /dev/null
+++ b/test_cases/q3/1-2-minmax.test
@@ -0,0 +1,47 @@
+class: "GraphGameTreeTest"
+alg: "AlphaBetaAgent"
+depth: "3"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ / \ |
+ c1 c2 cx
+ / \ / \ |
+ d1 d2 d3 d4 dx
+-3 -9 10 6 -2.99
+
+a - max
+b - min
+c - max
+
+Note that the minimax value of b1 is -3.
+"""
+num_agents: "2"
+
+start_state: "a"
+win_states: "d1 d2 d3 d4 dx"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Left c1
+b1 Right c2
+b2 Down cx
+c1 Left d1
+c1 Right d2
+c2 Left d3
+c2 Right d4
+cx Down dx
+"""
+
+evaluation: """
+d1 -3.0
+d2 -9.0
+d3 10.0
+d4 6.0
+dx -2.99
+"""
diff --git a/test_cases/q3/1-3-minmax.solution b/test_cases/q3/1-3-minmax.solution
new file mode 100644
index 0000000..827cdbd
--- /dev/null
+++ b/test_cases/q3/1-3-minmax.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q3/1-3-minmax.test.
+action: "Left"
+generated: "a b1 b2 c3 cx d5 d6 dx"
diff --git a/test_cases/q3/1-3-minmax.test b/test_cases/q3/1-3-minmax.test
new file mode 100644
index 0000000..0fc2226
--- /dev/null
+++ b/test_cases/q3/1-3-minmax.test
@@ -0,0 +1,47 @@
+class: "GraphGameTreeTest"
+alg: "AlphaBetaAgent"
+depth: "3"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ | / \
+ cx c3 c4
+ | / \ / \
+ dx d5 d6 d7 d8
+ 4.01 4 -7 0 5
+
+a - max
+b - min
+c - max
+
+Note that the minimax value of b2 is 4.
+"""
+num_agents: "2"
+
+start_state: "a"
+win_states: "d1 d2 d3 d4 d5 d6 d7 d8 dx"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Down cx
+b2 Left c3
+b2 Right c4
+c3 Left d5
+c3 Right d6
+c4 Left d7
+c4 Right d8
+cx Down dx
+"""
+
+evaluation: """
+d5 4.0
+d6 -7.0
+d7 0.0
+d8 5.0
+dx 4.01
+"""
diff --git a/test_cases/q3/1-4-minmax.solution b/test_cases/q3/1-4-minmax.solution
new file mode 100644
index 0000000..f5570d1
--- /dev/null
+++ b/test_cases/q3/1-4-minmax.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q3/1-4-minmax.test.
+action: "Right"
+generated: "a b1 b2 c3 c4 cx d5 d6 d7 d8 dx"
diff --git a/test_cases/q3/1-4-minmax.test b/test_cases/q3/1-4-minmax.test
new file mode 100644
index 0000000..d8d02e2
--- /dev/null
+++ b/test_cases/q3/1-4-minmax.test
@@ -0,0 +1,47 @@
+class: "GraphGameTreeTest"
+alg: "AlphaBetaAgent"
+depth: "3"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ | / \
+ cx c3 c4
+ | / \ / \
+ dx d5 d6 d7 d8
+ 3.99 4 -7 0 5
+
+a - max
+b - min
+c - max
+
+Note that the minimax value of b2 is 4.
+"""
+num_agents: "2"
+
+start_state: "a"
+win_states: "d1 d2 d3 d4 d5 d6 d7 d8 dx"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Down cx
+b2 Left c3
+b2 Right c4
+c3 Left d5
+c3 Right d6
+c4 Left d7
+c4 Right d8
+cx Down dx
+"""
+
+evaluation: """
+d5 4.0
+d6 -7.0
+d7 0.0
+d8 5.0
+dx 3.99
+"""
diff --git a/test_cases/q3/1-5-minmax.solution b/test_cases/q3/1-5-minmax.solution
new file mode 100644
index 0000000..06cebde
--- /dev/null
+++ b/test_cases/q3/1-5-minmax.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q3/1-5-minmax.test.
+action: "Right"
+generated: "A B C D E F G Z a b1 b2 c1 c2 cx d1 d2 d3 d4 dx"
diff --git a/test_cases/q3/1-5-minmax.test b/test_cases/q3/1-5-minmax.test
new file mode 100644
index 0000000..2d57290
--- /dev/null
+++ b/test_cases/q3/1-5-minmax.test
@@ -0,0 +1,75 @@
+class: "GraphGameTreeTest"
+alg: "AlphaBetaAgent"
+depth: "4"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ / \ |
+ c1 c2 cx
+ / \ / \ |
+ d1 d2 d3 d4 dx
+ / \ / \ / \ / \ |
+ A B C D E F G H Z
+-3 13 5 9 10 3 -6 8 3.01
+
+a - max
+b - min
+c - max
+d - min
+
+Note the minimax value of b1 is 3.
+"""
+num_agents: "2"
+
+start_state: "a"
+win_states: "A B C D E F G H I J K L M N O P Z"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Left c1
+b1 Right c2
+b2 Down cx
+c1 Left d1
+c1 Right d2
+c2 Left d3
+c2 Right d4
+c3 Left d5
+c3 Right d6
+c4 Left d7
+c4 Right d8
+cx Down dx
+d1 Left A
+d1 Right B
+d2 Left C
+d2 Right D
+d3 Left E
+d3 Right F
+d4 Left G
+d4 Right H
+d5 Left I
+d5 Right J
+d6 Left K
+d6 Right L
+d7 Left M
+d7 Right N
+d8 Left O
+d8 Right P
+dx Down Z
+"""
+
+evaluation: """
+A -3.0
+B 13.0
+C 5.0
+D 9.0
+E 10.0
+F 3.0
+G -6.0
+H 8.0
+Z 3.01
+"""
diff --git a/test_cases/q3/1-6-minmax.solution b/test_cases/q3/1-6-minmax.solution
new file mode 100644
index 0000000..1ced2ca
--- /dev/null
+++ b/test_cases/q3/1-6-minmax.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q3/1-6-minmax.test.
+action: "Left"
+generated: "A B C D E F G Z a b1 b2 c1 c2 cx d1 d2 d3 d4 dx"
diff --git a/test_cases/q3/1-6-minmax.test b/test_cases/q3/1-6-minmax.test
new file mode 100644
index 0000000..3360e93
--- /dev/null
+++ b/test_cases/q3/1-6-minmax.test
@@ -0,0 +1,75 @@
+class: "GraphGameTreeTest"
+alg: "AlphaBetaAgent"
+depth: "4"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ / \ |
+ c1 c2 cx
+ / \ / \ |
+ d1 d2 d3 d4 dx
+ / \ / \ / \ / \ |
+ A B C D E F G H Z
+-3 13 5 9 10 3 -6 8 2.99
+
+a - max
+b - min
+c - max
+d - min
+
+Note the minimax value of b1 is 3.
+"""
+num_agents: "2"
+
+start_state: "a"
+win_states: "A B C D E F G H I J K L M N O P Z"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Left c1
+b1 Right c2
+b2 Down cx
+c1 Left d1
+c1 Right d2
+c2 Left d3
+c2 Right d4
+c3 Left d5
+c3 Right d6
+c4 Left d7
+c4 Right d8
+cx Down dx
+d1 Left A
+d1 Right B
+d2 Left C
+d2 Right D
+d3 Left E
+d3 Right F
+d4 Left G
+d4 Right H
+d5 Left I
+d5 Right J
+d6 Left K
+d6 Right L
+d7 Left M
+d7 Right N
+d8 Left O
+d8 Right P
+dx Down Z
+"""
+
+evaluation: """
+A -3.0
+B 13.0
+C 5.0
+D 9.0
+E 10.0
+F 3.0
+G -6.0
+H 8.0
+Z 2.99
+"""
diff --git a/test_cases/q3/1-7-minmax.solution b/test_cases/q3/1-7-minmax.solution
new file mode 100644
index 0000000..74e2da0
--- /dev/null
+++ b/test_cases/q3/1-7-minmax.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q3/1-7-minmax.test.
+action: "Left"
+generated: "I J K M O P Z a b1 b2 c3 c4 cx d5 d6 d7 d8 dx"
diff --git a/test_cases/q3/1-7-minmax.test b/test_cases/q3/1-7-minmax.test
new file mode 100644
index 0000000..207381d
--- /dev/null
+++ b/test_cases/q3/1-7-minmax.test
@@ -0,0 +1,75 @@
+class: "GraphGameTreeTest"
+alg: "AlphaBetaAgent"
+depth: "4"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ | / \
+ cx c3 c4
+ | / \ / \
+ dx d5 d6 d7 d8
+ | / \ / \ / \ / \
+ Z I J K L M N O P
+ -1.99 -1 -9 4 7 2 5 -3 -2
+
+a - max
+b - min
+c - min
+d - max
+
+Note that the minimax value of b2 is -2
+"""
+num_agents: "3"
+
+start_state: "a"
+win_states: "A B C D E F G H I J K L M N O P Z"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Down cx
+b2 Left c3
+b2 Right c4
+c1 Left d1
+c1 Right d2
+c2 Left d3
+c2 Right d4
+c3 Left d5
+c3 Right d6
+c4 Left d7
+c4 Right d8
+cx Down dx
+d1 Left A
+d1 Right B
+d2 Left C
+d2 Right D
+d3 Left E
+d3 Right F
+d4 Left G
+d4 Right H
+d5 Left I
+d5 Right J
+d6 Left K
+d6 Right L
+d7 Left M
+d7 Right N
+d8 Left O
+d8 Right P
+dx Down Z
+"""
+
+evaluation: """
+I -1.0
+J -9.0
+K 4.0
+L 7.0
+M 2.0
+N 5.0
+O -3.0
+P -2.0
+Z -1.99
+"""
diff --git a/test_cases/q3/1-8-minmax.solution b/test_cases/q3/1-8-minmax.solution
new file mode 100644
index 0000000..bf426b8
--- /dev/null
+++ b/test_cases/q3/1-8-minmax.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q3/1-8-minmax.test.
+action: "Right"
+generated: "I J K M O P Z a b1 b2 c3 c4 cx d5 d6 d7 d8 dx"
diff --git a/test_cases/q3/1-8-minmax.test b/test_cases/q3/1-8-minmax.test
new file mode 100644
index 0000000..2272b30
--- /dev/null
+++ b/test_cases/q3/1-8-minmax.test
@@ -0,0 +1,75 @@
+class: "GraphGameTreeTest"
+alg: "AlphaBetaAgent"
+depth: "4"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ | / \
+ cx c3 c4
+ | / \ / \
+ dx d5 d6 d7 d8
+ | / \ / \ / \ / \
+ Z I J K L M N O P
+ -2.01 -1 -9 4 7 2 5 -3 -2
+
+a - max
+b - min
+c - min
+d - max
+
+Note that the minimax value of b2 is -2.01
+"""
+num_agents: "3"
+
+start_state: "a"
+win_states: "A B C D E F G H I J K L M N O P Z"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Down cx
+b2 Left c3
+b2 Right c4
+c1 Left d1
+c1 Right d2
+c2 Left d3
+c2 Right d4
+c3 Left d5
+c3 Right d6
+c4 Left d7
+c4 Right d8
+cx Down dx
+d1 Left A
+d1 Right B
+d2 Left C
+d2 Right D
+d3 Left E
+d3 Right F
+d4 Left G
+d4 Right H
+d5 Left I
+d5 Right J
+d6 Left K
+d6 Right L
+d7 Left M
+d7 Right N
+d8 Left O
+d8 Right P
+dx Down Z
+"""
+
+evaluation: """
+I -1.0
+J -9.0
+K 4.0
+L 7.0
+M 2.0
+N 5.0
+O -3.0
+P -2.0
+Z -2.01
+"""
diff --git a/test_cases/q3/2-1a-vary-depth.solution b/test_cases/q3/2-1a-vary-depth.solution
new file mode 100644
index 0000000..59d892c
--- /dev/null
+++ b/test_cases/q3/2-1a-vary-depth.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q3/2-1a-vary-depth.test.
+action: "Left"
+generated: "a b1 b2 c1 c2 cx"
diff --git a/test_cases/q3/2-1a-vary-depth.test b/test_cases/q3/2-1a-vary-depth.test
new file mode 100644
index 0000000..f8f9431
--- /dev/null
+++ b/test_cases/q3/2-1a-vary-depth.test
@@ -0,0 +1,52 @@
+class: "GraphGameTreeTest"
+alg: "AlphaBetaAgent"
+depth: "1"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ / \ |
+-4 c1 c2 9 cx -4.01
+ / \ / \ |
+ d1 d2 d3 d4 dx
+-3 -9 10 6 -4.01
+
+a - max
+b - min
+c - max
+
+Note that the minimax value of b1 is -3, but the depth=1 limited value is -4.
+The values next to c1, c2, and cx are the values of the evaluation function, not
+necessarily the correct minimax backup.
+"""
+num_agents: "2"
+
+start_state: "a"
+win_states: "d1 d2 d3 d4 dx"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Left c1
+b1 Right c2
+b2 Down cx
+c1 Left d1
+c1 Right d2
+c2 Left d3
+c2 Right d4
+cx Down dx
+"""
+
+evaluation: """
+c1 -4.0
+c2 9.0
+cx -4.01
+d1 -3.0
+d2 -9.0
+d3 10.0
+d4 6.0
+dx -4.01
+"""
diff --git a/test_cases/q3/2-1b-vary-depth.solution b/test_cases/q3/2-1b-vary-depth.solution
new file mode 100644
index 0000000..a0f232c
--- /dev/null
+++ b/test_cases/q3/2-1b-vary-depth.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q3/2-1b-vary-depth.test.
+action: "Left"
+generated: "a b1 b2 c1 c2 cx d1 d2 d3 dx"
diff --git a/test_cases/q3/2-1b-vary-depth.test b/test_cases/q3/2-1b-vary-depth.test
new file mode 100644
index 0000000..1b161af
--- /dev/null
+++ b/test_cases/q3/2-1b-vary-depth.test
@@ -0,0 +1,52 @@
+class: "GraphGameTreeTest"
+alg: "AlphaBetaAgent"
+depth: "2"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ / \ |
+-4 c1 c2 9 cx -4.01
+ / \ / \ |
+ d1 d2 d3 d4 dx
+-3 -9 10 6 -4.01
+
+a - max
+b - min
+c - max
+
+Note that the minimax value of b1 is -3, but the depth=1 limited value is -4.
+The values next to c1, c2, and cx are the values of the evaluation function, not
+necessarily the correct minimax backup.
+"""
+num_agents: "2"
+
+start_state: "a"
+win_states: "d1 d2 d3 d4 dx"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Left c1
+b1 Right c2
+b2 Down cx
+c1 Left d1
+c1 Right d2
+c2 Left d3
+c2 Right d4
+cx Down dx
+"""
+
+evaluation: """
+c1 -4.0
+c2 9.0
+cx -4.01
+d1 -3.0
+d2 -9.0
+d3 10.0
+d4 6.0
+dx -4.01
+"""
diff --git a/test_cases/q3/2-2a-vary-depth.solution b/test_cases/q3/2-2a-vary-depth.solution
new file mode 100644
index 0000000..6e8d7fd
--- /dev/null
+++ b/test_cases/q3/2-2a-vary-depth.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q3/2-2a-vary-depth.test.
+action: "Right"
+generated: "a b1 b2 c1 c2 cx"
diff --git a/test_cases/q3/2-2a-vary-depth.test b/test_cases/q3/2-2a-vary-depth.test
new file mode 100644
index 0000000..b436d21
--- /dev/null
+++ b/test_cases/q3/2-2a-vary-depth.test
@@ -0,0 +1,52 @@
+class: "GraphGameTreeTest"
+alg: "AlphaBetaAgent"
+depth: "1"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ / \ |
+-4 c1 c2 9 cx -3.99
+ / \ / \ |
+ d1 d2 d3 d4 dx
+-3 -9 10 6 -3.99
+
+a - max
+b - min
+c - max
+
+Note that the minimax value of b1 is -3, but the depth=1 limited value is -4.
+The values next to c1, c2, and cx are the values of the evaluation function, not
+necessarily the correct minimax backup.
+"""
+num_agents: "2"
+
+start_state: "a"
+win_states: "d1 d2 d3 d4 dx"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Left c1
+b1 Right c2
+b2 Down cx
+c1 Left d1
+c1 Right d2
+c2 Left d3
+c2 Right d4
+cx Down dx
+"""
+
+evaluation: """
+c1 -4.0
+c2 9.0
+cx -3.99
+d1 -3.0
+d2 -9.0
+d3 10.0
+d4 6.0
+dx -3.99
+"""
diff --git a/test_cases/q3/2-2b-vary-depth.solution b/test_cases/q3/2-2b-vary-depth.solution
new file mode 100644
index 0000000..f508aef
--- /dev/null
+++ b/test_cases/q3/2-2b-vary-depth.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q3/2-2b-vary-depth.test.
+action: "Left"
+generated: "a b1 b2 c1 c2 cx d1 d2 d3 dx"
diff --git a/test_cases/q3/2-2b-vary-depth.test b/test_cases/q3/2-2b-vary-depth.test
new file mode 100644
index 0000000..c0540a3
--- /dev/null
+++ b/test_cases/q3/2-2b-vary-depth.test
@@ -0,0 +1,52 @@
+class: "GraphGameTreeTest"
+alg: "AlphaBetaAgent"
+depth: "2"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ / \ |
+-4 c1 c2 9 cx -3.99
+ / \ / \ |
+ d1 d2 d3 d4 dx
+-3 -9 10 6 -3.99
+
+a - max
+b - min
+c - max
+
+Note that the minimax value of b1 is -3, but the depth=1 limited value is -4.
+The values next to c1, c2, and cx are the values of the evaluation function, not
+necessarily the correct minimax backup.
+"""
+num_agents: "2"
+
+start_state: "a"
+win_states: "d1 d2 d3 d4 dx"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Left c1
+b1 Right c2
+b2 Down cx
+c1 Left d1
+c1 Right d2
+c2 Left d3
+c2 Right d4
+cx Down dx
+"""
+
+evaluation: """
+c1 -4.0
+c2 9.0
+cx -3.99
+d1 -3.0
+d2 -9.0
+d3 10.0
+d4 6.0
+dx -3.99
+"""
diff --git a/test_cases/q3/2-3a-vary-depth.solution b/test_cases/q3/2-3a-vary-depth.solution
new file mode 100644
index 0000000..d805b84
--- /dev/null
+++ b/test_cases/q3/2-3a-vary-depth.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q3/2-3a-vary-depth.test.
+action: "Left"
+generated: "a b1 b2 c3 c4 cx"
diff --git a/test_cases/q3/2-3a-vary-depth.test b/test_cases/q3/2-3a-vary-depth.test
new file mode 100644
index 0000000..853a8d3
--- /dev/null
+++ b/test_cases/q3/2-3a-vary-depth.test
@@ -0,0 +1,52 @@
+class: "GraphGameTreeTest"
+alg: "AlphaBetaAgent"
+depth: "1"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ | / \
+ 5.01 cx 8 c3 c4 5
+ | / \ / \
+ dx d5 d6 d7 d8
+ 5.01 4 -7 0 5
+
+a - max
+b - min
+c - max
+
+Note that the minimax value of b1 is 4, but the depth=1 limited value is 5.
+The values next to c3, c4, and cx are the values of the evaluation function, not
+necessarily the correct minimax backup.
+"""
+num_agents: "2"
+
+start_state: "a"
+win_states: "d1 d2 d3 d4 d5 d6 d7 d8 dx"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Down cx
+b2 Left c3
+b2 Right c4
+c3 Left d5
+c3 Right d6
+c4 Left d7
+c4 Right d8
+cx Down dx
+"""
+
+evaluation: """
+c3 8.0
+c4 5.0
+cx 5.01
+d5 4.0
+d6 -7.0
+d7 0.0
+d8 5.0
+dx 5.01
+"""
diff --git a/test_cases/q3/2-3b-vary-depth.solution b/test_cases/q3/2-3b-vary-depth.solution
new file mode 100644
index 0000000..32a70c6
--- /dev/null
+++ b/test_cases/q3/2-3b-vary-depth.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q3/2-3b-vary-depth.test.
+action: "Left"
+generated: "a b1 b2 c3 cx d5 d6 dx"
diff --git a/test_cases/q3/2-3b-vary-depth.test b/test_cases/q3/2-3b-vary-depth.test
new file mode 100644
index 0000000..2892005
--- /dev/null
+++ b/test_cases/q3/2-3b-vary-depth.test
@@ -0,0 +1,52 @@
+class: "GraphGameTreeTest"
+alg: "AlphaBetaAgent"
+depth: "2"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ | / \
+ 5.01 cx 8 c3 c4 5
+ | / \ / \
+ dx d5 d6 d7 d8
+ 5.01 4 -7 0 5
+
+a - max
+b - min
+c - max
+
+Note that the minimax value of b1 is 4, but the depth=1 limited value is 5.
+The values next to c3, c4, and cx are the values of the evaluation function, not
+necessarily the correct minimax backup.
+"""
+num_agents: "2"
+
+start_state: "a"
+win_states: "d1 d2 d3 d4 d5 d6 d7 d8 dx"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Down cx
+b2 Left c3
+b2 Right c4
+c3 Left d5
+c3 Right d6
+c4 Left d7
+c4 Right d8
+cx Down dx
+"""
+
+evaluation: """
+c3 8.0
+c4 5.0
+cx 5.01
+d5 4.0
+d6 -7.0
+d7 0.0
+d8 5.0
+dx 5.01
+"""
diff --git a/test_cases/q3/2-4a-vary-depth.solution b/test_cases/q3/2-4a-vary-depth.solution
new file mode 100644
index 0000000..eafdb62
--- /dev/null
+++ b/test_cases/q3/2-4a-vary-depth.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q3/2-4a-vary-depth.test.
+action: "Right"
+generated: "a b1 b2 c3 c4 cx"
diff --git a/test_cases/q3/2-4a-vary-depth.test b/test_cases/q3/2-4a-vary-depth.test
new file mode 100644
index 0000000..73d681b
--- /dev/null
+++ b/test_cases/q3/2-4a-vary-depth.test
@@ -0,0 +1,52 @@
+class: "GraphGameTreeTest"
+alg: "AlphaBetaAgent"
+depth: "1"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ | / \
+ 4.99 cx 8 c3 c4 5
+ | / \ / \
+ dx d5 d6 d7 d8
+ 4.99 4 -7 0 5
+
+a - max
+b - min
+c - max
+
+Note that the minimax value of b1 is 4, but the depth=1 limited value is 5.
+The values next to c3, c4, and cx are the values of the evaluation function, not
+necessarily the correct minimax backup.
+"""
+num_agents: "2"
+
+start_state: "a"
+win_states: "d1 d2 d3 d4 d5 d6 d7 d8 dx"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Down cx
+b2 Left c3
+b2 Right c4
+c3 Left d5
+c3 Right d6
+c4 Left d7
+c4 Right d8
+cx Down dx
+"""
+
+evaluation: """
+c3 8.0
+c4 5.0
+cx 4.99
+d5 4.0
+d6 -7.0
+d7 0.0
+d8 5.0
+dx 4.99
+"""
diff --git a/test_cases/q3/2-4b-vary-depth.solution b/test_cases/q3/2-4b-vary-depth.solution
new file mode 100644
index 0000000..820398a
--- /dev/null
+++ b/test_cases/q3/2-4b-vary-depth.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q3/2-4b-vary-depth.test.
+action: "Left"
+generated: "a b1 b2 c3 cx d5 d6 dx"
diff --git a/test_cases/q3/2-4b-vary-depth.test b/test_cases/q3/2-4b-vary-depth.test
new file mode 100644
index 0000000..a7aca5f
--- /dev/null
+++ b/test_cases/q3/2-4b-vary-depth.test
@@ -0,0 +1,52 @@
+class: "GraphGameTreeTest"
+alg: "AlphaBetaAgent"
+depth: "2"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ | / \
+ 4.99 cx 8 c3 c4 5
+ | / \ / \
+ dx d5 d6 d7 d8
+ 4.99 4 -7 0 5
+
+a - max
+b - min
+c - max
+
+Note that the minimax value of b1 is 4, but the depth=1 limited value is 5.
+The values next to c3, c4, and cx are the values of the evaluation function, not
+necessarily the correct minimax backup.
+"""
+num_agents: "2"
+
+start_state: "a"
+win_states: "d1 d2 d3 d4 d5 d6 d7 d8 dx"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Down cx
+b2 Left c3
+b2 Right c4
+c3 Left d5
+c3 Right d6
+c4 Left d7
+c4 Right d8
+cx Down dx
+"""
+
+evaluation: """
+c3 8.0
+c4 5.0
+cx 4.99
+d5 4.0
+d6 -7.0
+d7 0.0
+d8 5.0
+dx 4.99
+"""
diff --git a/test_cases/q3/2-one-ghost-3level.solution b/test_cases/q3/2-one-ghost-3level.solution
new file mode 100644
index 0000000..b22f76f
--- /dev/null
+++ b/test_cases/q3/2-one-ghost-3level.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q3/2-one-ghost-3level.test.
+action: "Left"
+generated: "a b1 b2 c1 c2 c3 d1 d2 d3 d5 d6"
diff --git a/test_cases/q3/2-one-ghost-3level.test b/test_cases/q3/2-one-ghost-3level.test
new file mode 100644
index 0000000..2ec19b4
--- /dev/null
+++ b/test_cases/q3/2-one-ghost-3level.test
@@ -0,0 +1,52 @@
+class: "GraphGameTreeTest"
+alg: "AlphaBetaAgent"
+depth: "3"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ / \ / \
+ c1 c2 c3 c4
+ / \ / \ / \ / \
+ d1 d2 d3 d4 d5 d6 d7 d8
+ 3 9 10 6 4 7 0 5
+
+a - max
+b - min
+c - max
+"""
+num_agents: "2"
+
+start_state: "a"
+win_states: "d1 d2 d3 d4 d5 d6 d7 d8"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Left c1
+b1 Right c2
+b2 Left c3
+b2 Right c4
+c1 Left d1
+c1 Right d2
+c2 Left d3
+c2 Right d4
+c3 Left d5
+c3 Right d6
+c4 Left d7
+c4 Right d8
+"""
+
+evaluation: """
+d1 3.0
+d2 9.0
+d3 10.0
+d4 6.0
+d5 4.0
+d6 7.0
+d7 0.0
+d8 5.0
+"""
diff --git a/test_cases/q3/3-one-ghost-4level.solution b/test_cases/q3/3-one-ghost-4level.solution
new file mode 100644
index 0000000..b6a9a0f
--- /dev/null
+++ b/test_cases/q3/3-one-ghost-4level.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q3/3-one-ghost-4level.test.
+action: "Left"
+generated: "A B C D E F I K a b1 b2 c1 c2 c3 d1 d2 d3 d5 d6"
diff --git a/test_cases/q3/3-one-ghost-4level.test b/test_cases/q3/3-one-ghost-4level.test
new file mode 100644
index 0000000..cfa33db
--- /dev/null
+++ b/test_cases/q3/3-one-ghost-4level.test
@@ -0,0 +1,79 @@
+class: "GraphGameTreeTest"
+alg: "AlphaBetaAgent"
+depth: "4"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ / \ / \
+ c1 c2 c3 c4
+ / \ / \ / \ / \
+ d1 d2 d3 d4 d5 d6 d7 d8
+/ \ / \ / \ / \ / \ / \ / \ / \
+A B C D E F G H I J K L M N O P
+3 13 5 9 10 11 6 8 1 0 4 7 12 15 2 14
+
+a - max
+b - min
+c - max
+d - min
+"""
+num_agents: "2"
+
+start_state: "a"
+win_states: "A B C D E F G H I J K L M N O P"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Left c1
+b1 Right c2
+b2 Left c3
+b2 Right c4
+c1 Left d1
+c1 Right d2
+c2 Left d3
+c2 Right d4
+c3 Left d5
+c3 Right d6
+c4 Left d7
+c4 Right d8
+d1 Left A
+d1 Right B
+d2 Left C
+d2 Right D
+d3 Left E
+d3 Right F
+d4 Left G
+d4 Right H
+d5 Left I
+d5 Right J
+d6 Left K
+d6 Right L
+d7 Left M
+d7 Right N
+d8 Left O
+d8 Right P
+"""
+
+evaluation: """
+A 3.0
+B 13.0
+C 5.0
+D 9.0
+E 10.0
+F 11.0
+G 6.0
+H 8.0
+I 1.0
+J 0.0
+K 4.0
+L 7.0
+M 12.0
+N 15.0
+O 2.0
+P 14.0
+"""
diff --git a/test_cases/q3/4-two-ghosts-3level.solution b/test_cases/q3/4-two-ghosts-3level.solution
new file mode 100644
index 0000000..12e9520
--- /dev/null
+++ b/test_cases/q3/4-two-ghosts-3level.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q3/4-two-ghosts-3level.test.
+action: "Left"
+generated: "a b1 b2 c1 c2 c3 c4 d1 d2 d3 d4 d5 d6 d7"
diff --git a/test_cases/q3/4-two-ghosts-3level.test b/test_cases/q3/4-two-ghosts-3level.test
new file mode 100644
index 0000000..fd3ac91
--- /dev/null
+++ b/test_cases/q3/4-two-ghosts-3level.test
@@ -0,0 +1,52 @@
+class: "GraphGameTreeTest"
+alg: "AlphaBetaAgent"
+depth: "3"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ / \ / \
+ c1 c2 c3 c4
+ / \ / \ / \ / \
+ d1 d2 d3 d4 d5 d6 d7 d8
+ 3 9 10 6 4 7 0 5
+
+a - max
+b - min
+c - min
+"""
+num_agents: "3"
+
+start_state: "a"
+win_states: "d1 d2 d3 d4 d5 d6 d7 d8"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Left c1
+b1 Right c2
+b2 Left c3
+b2 Right c4
+c1 Left d1
+c1 Right d2
+c2 Left d3
+c2 Right d4
+c3 Left d5
+c3 Right d6
+c4 Left d7
+c4 Right d8
+"""
+
+evaluation: """
+d1 3.0
+d2 9.0
+d3 10.0
+d4 6.0
+d5 4.0
+d6 7.0
+d7 0.0
+d8 5.0
+"""
diff --git a/test_cases/q3/5-two-ghosts-4level.solution b/test_cases/q3/5-two-ghosts-4level.solution
new file mode 100644
index 0000000..89c534c
--- /dev/null
+++ b/test_cases/q3/5-two-ghosts-4level.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q3/5-two-ghosts-4level.test.
+action: "Left"
+generated: "A B C D E G H I J a b1 b2 c1 c2 c3 d1 d2 d3 d4 d5"
diff --git a/test_cases/q3/5-two-ghosts-4level.test b/test_cases/q3/5-two-ghosts-4level.test
new file mode 100644
index 0000000..ce431fc
--- /dev/null
+++ b/test_cases/q3/5-two-ghosts-4level.test
@@ -0,0 +1,79 @@
+class: "GraphGameTreeTest"
+alg: "AlphaBetaAgent"
+depth: "4"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ / \ / \
+ c1 c2 c3 c4
+ / \ / \ / \ / \
+ d1 d2 d3 d4 d5 d6 d7 d8
+/ \ / \ / \ / \ / \ / \ / \ / \
+A B C D E F G H I J K L M N O P
+3 13 5 9 10 11 6 8 1 0 4 7 12 15 2 14
+
+a - max
+b - min
+c - min
+d - max
+"""
+num_agents: "3"
+
+start_state: "a"
+win_states: "A B C D E F G H I J K L M N O P"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Left c1
+b1 Right c2
+b2 Left c3
+b2 Right c4
+c1 Left d1
+c1 Right d2
+c2 Left d3
+c2 Right d4
+c3 Left d5
+c3 Right d6
+c4 Left d7
+c4 Right d8
+d1 Left A
+d1 Right B
+d2 Left C
+d2 Right D
+d3 Left E
+d3 Right F
+d4 Left G
+d4 Right H
+d5 Left I
+d5 Right J
+d6 Left K
+d6 Right L
+d7 Left M
+d7 Right N
+d8 Left O
+d8 Right P
+"""
+
+evaluation: """
+A 3.0
+B 13.0
+C 5.0
+D 9.0
+E 10.0
+F 11.0
+G 6.0
+H 8.0
+I 1.0
+J 0.0
+K 4.0
+L 7.0
+M 12.0
+N 15.0
+O 2.0
+P 14.0
+"""
diff --git a/test_cases/q3/6-tied-root.solution b/test_cases/q3/6-tied-root.solution
new file mode 100644
index 0000000..93ece27
--- /dev/null
+++ b/test_cases/q3/6-tied-root.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q3/6-tied-root.test.
+action: "Left"
+generated: "A B C max min1 min2"
diff --git a/test_cases/q3/6-tied-root.test b/test_cases/q3/6-tied-root.test
new file mode 100644
index 0000000..b43f4d9
--- /dev/null
+++ b/test_cases/q3/6-tied-root.test
@@ -0,0 +1,31 @@
+class: "GraphGameTreeTest"
+alg: "AlphaBetaAgent"
+depth: "3"
+
+diagram: """
+ max
+ / \
+min1 min2
+ | / \
+ A B C
+10 10 0
+"""
+num_agents: "2"
+
+start_state: "max"
+win_states: "A B"
+lose_states: "C"
+
+successors: """
+max Left min1
+max Right min2
+min1 Down A
+min2 Left B
+min2 Right C
+"""
+
+evaluation: """
+A 10.0
+B 10.0
+C 0.0
+"""
diff --git a/test_cases/q3/7-1a-check-depth-one-ghost.solution b/test_cases/q3/7-1a-check-depth-one-ghost.solution
new file mode 100644
index 0000000..a3a74c4
--- /dev/null
+++ b/test_cases/q3/7-1a-check-depth-one-ghost.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q3/7-1a-check-depth-one-ghost.test.
+action: "Left"
+generated: "a b1 b2 b3 c1 c2 c3"
diff --git a/test_cases/q3/7-1a-check-depth-one-ghost.test b/test_cases/q3/7-1a-check-depth-one-ghost.test
new file mode 100644
index 0000000..0230cbb
--- /dev/null
+++ b/test_cases/q3/7-1a-check-depth-one-ghost.test
@@ -0,0 +1,83 @@
+class: "GraphGameTreeTest"
+alg: "AlphaBetaAgent"
+depth: "1"
+
+diagram: """
+ a
+ /-/ | \--\
+ / | \
+ 0 b1 0 b2 b3 8
+ | | |
+ 10 c1 0 c2 c3 8
+ | | |
+ 0 d1 0 d2 d3 8
+ | | |
+ 0 e1 10 e2 e3 8
+ | | |
+ 0 f1 0 f2 f3 8
+ | | |
+ g1 g2 g3
+ 0 0 8
+
+a - max
+b - min
+c - max
+d - min
+e - max
+f - min
+
+At depth 1, the evaluation function is called at level c,
+so Left should be returned. If your algorithm is returning a
+different action, check how you implemented your depth.
+"""
+
+num_agents: "2"
+
+start_state: "a"
+win_states: "g1 g2 g3"
+lose_states: ""
+
+successors: """
+a Left b1
+a Center b2
+a Right b3
+b1 Center c1
+b2 Center c2
+b3 Center c3
+c1 Center d1
+c2 Center d2
+c3 Center d3
+d1 Center e1
+d2 Center e2
+d3 Center e3
+e1 Center f1
+e2 Center f2
+e3 Center f3
+f1 Center g1
+f2 Center g2
+f3 Center g3
+"""
+
+
+evaluation: """
+b1 0.0
+b2 0.0
+b3 8.0
+c1 10.0
+c2 0.0
+c3 8.0
+d1 0.0
+d2 0.0
+d3 8.0
+e1 0.0
+e2 10.0
+e3 8.0
+f1 0.0
+f2 0.0
+f3 8.0
+g1 0.0
+g2 0.0
+g3 8.0
+"""
+
+
diff --git a/test_cases/q3/7-1b-check-depth-one-ghost.solution b/test_cases/q3/7-1b-check-depth-one-ghost.solution
new file mode 100644
index 0000000..8dad3c7
--- /dev/null
+++ b/test_cases/q3/7-1b-check-depth-one-ghost.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q3/7-1b-check-depth-one-ghost.test.
+action: "Center"
+generated: "a b1 b2 b3 c1 c2 c3 d1 d2 d3 e1 e2 e3"
diff --git a/test_cases/q3/7-1b-check-depth-one-ghost.test b/test_cases/q3/7-1b-check-depth-one-ghost.test
new file mode 100644
index 0000000..7e0ced7
--- /dev/null
+++ b/test_cases/q3/7-1b-check-depth-one-ghost.test
@@ -0,0 +1,83 @@
+class: "GraphGameTreeTest"
+alg: "AlphaBetaAgent"
+depth: "2"
+
+diagram: """
+ a
+ /-/ | \--\
+ / | \
+ 0 b1 0 b2 b3 8
+ | | |
+ 10 c1 0 c2 c3 8
+ | | |
+ 0 d1 0 d2 d3 8
+ | | |
+ 0 e1 10 e2 e3 8
+ | | |
+ 0 f1 0 f2 f3 8
+ | | |
+ g1 g2 g3
+ 0 0 8
+
+a - max
+b - min
+c - max
+d - min
+e - max
+f - min
+
+At depth 2, the evaluation function is called at level e,
+so Center should be returned. If your algorithm is returning a
+different action, check how you implemented your depth.
+"""
+
+num_agents: "2"
+
+start_state: "a"
+win_states: "g1 g2 g3"
+lose_states: ""
+
+successors: """
+a Left b1
+a Center b2
+a Right b3
+b1 Center c1
+b2 Center c2
+b3 Center c3
+c1 Center d1
+c2 Center d2
+c3 Center d3
+d1 Center e1
+d2 Center e2
+d3 Center e3
+e1 Center f1
+e2 Center f2
+e3 Center f3
+f1 Center g1
+f2 Center g2
+f3 Center g3
+"""
+
+
+evaluation: """
+b1 0.0
+b2 0.0
+b3 8.0
+c1 10.0
+c2 0.0
+c3 8.0
+d1 0.0
+d2 0.0
+d3 8.0
+e1 0.0
+e2 10.0
+e3 8.0
+f1 0.0
+f2 0.0
+f3 8.0
+g1 0.0
+g2 0.0
+g3 8.0
+"""
+
+
diff --git a/test_cases/q3/7-1c-check-depth-one-ghost.solution b/test_cases/q3/7-1c-check-depth-one-ghost.solution
new file mode 100644
index 0000000..210bb5d
--- /dev/null
+++ b/test_cases/q3/7-1c-check-depth-one-ghost.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q3/7-1c-check-depth-one-ghost.test.
+action: "Right"
+generated: "a b1 b2 b3 c1 c2 c3 d1 d2 d3 e1 e2 e3 f1 f2 f3 g1 g2 g3"
diff --git a/test_cases/q3/7-1c-check-depth-one-ghost.test b/test_cases/q3/7-1c-check-depth-one-ghost.test
new file mode 100644
index 0000000..cd92eb9
--- /dev/null
+++ b/test_cases/q3/7-1c-check-depth-one-ghost.test
@@ -0,0 +1,83 @@
+class: "GraphGameTreeTest"
+alg: "AlphaBetaAgent"
+depth: "3"
+
+diagram: """
+ a
+ /-/ | \--\
+ / | \
+ 0 b1 0 b2 b3 8
+ | | |
+ 10 c1 0 c2 c3 8
+ | | |
+ 0 d1 0 d2 d3 8
+ | | |
+ 0 e1 10 e2 e3 8
+ | | |
+ 0 f1 0 f2 f3 8
+ | | |
+ g1 g2 g3
+ 0 0 8
+
+a - max
+b - min
+c - max
+d - min
+e - max
+f - min
+
+At depth 3, the evaluation function is called at level g,
+so Right should be returned. If your algorithm is returning a
+different action, check how you implemented your depth.
+"""
+
+num_agents: "2"
+
+start_state: "a"
+win_states: "g1 g2 g3"
+lose_states: ""
+
+successors: """
+a Left b1
+a Center b2
+a Right b3
+b1 Center c1
+b2 Center c2
+b3 Center c3
+c1 Center d1
+c2 Center d2
+c3 Center d3
+d1 Center e1
+d2 Center e2
+d3 Center e3
+e1 Center f1
+e2 Center f2
+e3 Center f3
+f1 Center g1
+f2 Center g2
+f3 Center g3
+"""
+
+
+evaluation: """
+b1 0.0
+b2 0.0
+b3 8.0
+c1 10.0
+c2 0.0
+c3 8.0
+d1 0.0
+d2 0.0
+d3 8.0
+e1 0.0
+e2 10.0
+e3 8.0
+f1 0.0
+f2 0.0
+f3 8.0
+g1 0.0
+g2 0.0
+g3 8.0
+"""
+
+
diff --git a/test_cases/q3/7-2a-check-depth-two-ghosts.solution b/test_cases/q3/7-2a-check-depth-two-ghosts.solution
new file mode 100644
index 0000000..9a08a38
--- /dev/null
+++ b/test_cases/q3/7-2a-check-depth-two-ghosts.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q3/7-2a-check-depth-two-ghosts.test.
+action: "Left"
+generated: "a b1 b2 b3 c1 c2 c3 d1 d2 d3"
diff --git a/test_cases/q3/7-2a-check-depth-two-ghosts.test b/test_cases/q3/7-2a-check-depth-two-ghosts.test
new file mode 100644
index 0000000..421a05c
--- /dev/null
+++ b/test_cases/q3/7-2a-check-depth-two-ghosts.test
@@ -0,0 +1,110 @@
+class: "GraphGameTreeTest"
+alg: "AlphaBetaAgent"
+depth: "1"
+
+diagram: """
+ a
+ /-/ | \--\
+ / | \
+ 0 b1 0 b2 b3 8
+ | | |
+ 0 c1 0 c2 c3 8
+ | | |
+ 10 d1 0 d2 d3 8
+ | | |
+ 0 e1 0 e2 e3 8
+ | | |
+ 0 f1 0 f2 f3 8
+ | | |
+ 0 g1 10 g2 g3 8
+ | | |
+ 0 h1 0 h2 h3 8
+ | | |
+ 0 i1 0 i2 i3 8
+ | | |
+ j1 j2 j3
+ 0 0 8
+
+a - max
+b - min
+c - min
+d - max
+e - min
+f - min
+g - max
+h - min
+i - min
+
+At depth 1, the evaluation function is called at level d,
+so Left should be returned. If your algorithm is returning a
+different action, check how you implemented your depth.
+"""
+
+num_agents: "3"
+
+start_state: "a"
+win_states: "j1 j2 j3"
+lose_states: ""
+
+successors: """
+a Left b1
+a Center b2
+a Right b3
+b1 Center c1
+b2 Center c2
+b3 Center c3
+c1 Center d1
+c2 Center d2
+c3 Center d3
+d1 Center e1
+d2 Center e2
+d3 Center e3
+e1 Center f1
+e2 Center f2
+e3 Center f3
+f1 Center g1
+f2 Center g2
+f3 Center g3
+g1 Center h1
+g2 Center h2
+g3 Center h3
+h1 Center i1
+h2 Center i2
+h3 Center i3
+i1 Center j1
+i2 Center j2
+i3 Center j3
+"""
+
+
+evaluation: """
+b1 0.0
+b2 0.0
+b3 8.0
+c1 0.0
+c2 0.0
+c3 8.0
+d1 10.0
+d2 0.0
+d3 8.0
+e1 0.0
+e2 0.0
+e3 8.0
+f1 0.0
+f2 0.0
+f3 8.0
+g1 0.0
+g2 10.0
+g3 8.0
+h1 0.0
+h2 0.0
+h3 8.0
+i1 0.0
+i2 0.0
+i3 8.0
+j1 0.0
+j2 0.0
+j3 8.0
+"""
+
+
diff --git a/test_cases/q3/7-2b-check-depth-two-ghosts.solution b/test_cases/q3/7-2b-check-depth-two-ghosts.solution
new file mode 100644
index 0000000..194c0aa
--- /dev/null
+++ b/test_cases/q3/7-2b-check-depth-two-ghosts.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q3/7-2b-check-depth-two-ghosts.test.
+action: "Center"
+generated: "a b1 b2 b3 c1 c2 c3 d1 d2 d3 e1 e2 e3 f1 f2 f3 g1 g2 g3"
diff --git a/test_cases/q3/7-2b-check-depth-two-ghosts.test b/test_cases/q3/7-2b-check-depth-two-ghosts.test
new file mode 100644
index 0000000..728806e
--- /dev/null
+++ b/test_cases/q3/7-2b-check-depth-two-ghosts.test
@@ -0,0 +1,110 @@
+class: "GraphGameTreeTest"
+alg: "AlphaBetaAgent"
+depth: "2"
+
+diagram: """
+ a
+ /-/ | \--\
+ / | \
+ 0 b1 0 b2 b3 8
+ | | |
+ 0 c1 0 c2 c3 8
+ | | |
+ 10 d1 0 d2 d3 8
+ | | |
+ 0 e1 0 e2 e3 8
+ | | |
+ 0 f1 0 f2 f3 8
+ | | |
+ 0 g1 10 g2 g3 8
+ | | |
+ 0 h1 0 h2 h3 8
+ | | |
+ 0 i1 0 i2 i3 8
+ | | |
+ j1 j2 j3
+ 0 0 8
+
+a - max
+b - min
+c - min
+d - max
+e - min
+f - min
+g - max
+h - min
+i - min
+
+At depth 2, the evaluation function is called at level g,
+so Center should be returned. If your algorithm is returning
+a different action, check how you implemented your depth.
+"""
+
+num_agents: "3"
+
+start_state: "a"
+win_states: "j1 j2 j3"
+lose_states: ""
+
+successors: """
+a Left b1
+a Center b2
+a Right b3
+b1 Center c1
+b2 Center c2
+b3 Center c3
+c1 Center d1
+c2 Center d2
+c3 Center d3
+d1 Center e1
+d2 Center e2
+d3 Center e3
+e1 Center f1
+e2 Center f2
+e3 Center f3
+f1 Center g1
+f2 Center g2
+f3 Center g3
+g1 Center h1
+g2 Center h2
+g3 Center h3
+h1 Center i1
+h2 Center i2
+h3 Center i3
+i1 Center j1
+i2 Center j2
+i3 Center j3
+"""
+
+
+evaluation: """
+b1 0.0
+b2 0.0
+b3 8.0
+c1 0.0
+c2 0.0
+c3 8.0
+d1 10.0
+d2 0.0
+d3 8.0
+e1 0.0
+e2 0.0
+e3 8.0
+f1 0.0
+f2 0.0
+f3 8.0
+g1 0.0
+g2 10.0
+g3 8.0
+h1 0.0
+h2 0.0
+h3 8.0
+i1 0.0
+i2 0.0
+i3 8.0
+j1 0.0
+j2 0.0
+j3 8.0
+"""
+
+
diff --git a/test_cases/q3/7-2c-check-depth-two-ghosts.solution b/test_cases/q3/7-2c-check-depth-two-ghosts.solution
new file mode 100644
index 0000000..1e989f3
--- /dev/null
+++ b/test_cases/q3/7-2c-check-depth-two-ghosts.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q3/7-2c-check-depth-two-ghosts.test.
+action: "Right"
+generated: "a b1 b2 b3 c1 c2 c3 d1 d2 d3 e1 e2 e3 f1 f2 f3 g1 g2 g3 h1 h2 h3 i1 i2 i3 j1 j2 j3"
diff --git a/test_cases/q3/7-2c-check-depth-two-ghosts.test b/test_cases/q3/7-2c-check-depth-two-ghosts.test
new file mode 100644
index 0000000..9e727ce
--- /dev/null
+++ b/test_cases/q3/7-2c-check-depth-two-ghosts.test
@@ -0,0 +1,110 @@
+class: "GraphGameTreeTest"
+alg: "AlphaBetaAgent"
+depth: "3"
+
+diagram: """
+ a
+ /-/ | \--\
+ / | \
+ 0 b1 0 b2 b3 8
+ | | |
+ 0 c1 0 c2 c3 8
+ | | |
+ 10 d1 0 d2 d3 8
+ | | |
+ 0 e1 0 e2 e3 8
+ | | |
+ 0 f1 0 f2 f3 8
+ | | |
+ 0 g1 10 g2 g3 8
+ | | |
+ 0 h1 0 h2 h3 8
+ | | |
+ 0 i1 0 i2 i3 8
+ | | |
+ j1 j2 j3
+ 0 0 8
+
+a - max
+b - min
+c - min
+d - max
+e - min
+f - min
+g - max
+h - min
+i - min
+
+At depth 3, the evaluation function is called at level j,
+so Right should be returned. If your algorithm is returning
+a different action, check how you implemented your depth.
+"""
+
+num_agents: "3"
+
+start_state: "a"
+win_states: "j1 j2 j3"
+lose_states: ""
+
+successors: """
+a Left b1
+a Center b2
+a Right b3
+b1 Center c1
+b2 Center c2
+b3 Center c3
+c1 Center d1
+c2 Center d2
+c3 Center d3
+d1 Center e1
+d2 Center e2
+d3 Center e3
+e1 Center f1
+e2 Center f2
+e3 Center f3
+f1 Center g1
+f2 Center g2
+f3 Center g3
+g1 Center h1
+g2 Center h2
+g3 Center h3
+h1 Center i1
+h2 Center i2
+h3 Center i3
+i1 Center j1
+i2 Center j2
+i3 Center j3
+"""
+
+
+evaluation: """
+b1 0.0
+b2 0.0
+b3 8.0
+c1 0.0
+c2 0.0
+c3 8.0
+d1 10.0
+d2 0.0
+d3 8.0
+e1 0.0
+e2 0.0
+e3 8.0
+f1 0.0
+f2 0.0
+f3 8.0
+g1 0.0
+g2 10.0
+g3 8.0
+h1 0.0
+h2 0.0
+h3 8.0
+i1 0.0
+i2 0.0
+i3 8.0
+j1 0.0
+j2 0.0
+j3 8.0
+"""
+
+
diff --git a/test_cases/q3/8-pacman-game.solution b/test_cases/q3/8-pacman-game.solution
new file mode 100644
index 0000000..ec0b0b8
--- /dev/null
+++ b/test_cases/q3/8-pacman-game.solution
@@ -0,0 +1,444 @@
+optimalActions: """
+[[["West", "East"], 39], [["West", "East"], 27]]
+[[["West"], 75], [["West"], 57]]
+[[["West"], 95], [["West"], 77]]
+[[["West", "North"], 72], [["West", "North"], 54]]
+[[["West"], 51], [["West"], 39]]
+[[["West", "North"], 76], [["West", "North"], 58]]
+[[["West"], 69], [["West"], 51]]
+[[["West"], 34], [["West"], 23]]
+[[["North"], 55], [["North"], 41]]
+[[["North"], 63], [["North"], 32]]
+[[["North"], 87], [["North"], 43]]
+[[["North"], 69], [["North"], 34]]
+[[["Stop", "East"], 116], [["East"], 56]]
+[[["East"], 110], [["East"], 52]]
+[[["West"], 28], [["West"], 17]]
+[[["South"], 15], [["South"], 9]]
+[[["South"], 15], [["South"], 9]]
+[[["South"], 15], [["South"], 9]]
+[[["South"], 15], [["South"], 9]]
+[[["East"], 15], [["East"], 9]]
+[[["East"], 15], [["East"], 9]]
+[[["East"], 18], [["East"], 12]]
+[[["North"], 28], [["North"], 18]]
+[[["North"], 47], [["North"], 30]]
+[[["West"], 35], [["West"], 23]]
+[[["East"], 28], [["East"], 15]]
+[[["North"], 74], [["North"], 48]]
+[[["East", "North"], 130], [["East", "North"], 87]]
+[[["East", "North"], 213], [["East", "North"], 158]]
+[[["North", "South"], 101], [["North", "South"], 73]]
+[[["West"], 48], [["West"], 36]]
+[[["West"], 60], [["West"], 45]]
+[[["West", "Stop"], 49], [["West"], 36]]
+[[["West", "Stop", "East"], 69], [["West", "East"], 49]]
+[[["West", "Stop", "East"], 61], [["West", "East"], 41]]
+[[["Stop", "East", "South"], 55], [["East", "South"], 37]]
+[[["Stop", "East", "South"], 28], [["East", "South"], 19]]
+[[["Stop", "East", "South"], 34], [["East", "South"], 23]]
+[[["Stop", "East", "South"], 55], [["East", "South"], 37]]
+[[["Stop", "East", "South"], 55], [["East", "South"], 37]]
+[[["Stop", "East", "South"], 61], [["East", "South"], 41]]
+[[["Stop", "East", "South"], 81], [["East", "South"], 53]]
+[[["Stop", "East", "South"], 64], [["East", "South"], 43]]
+[[["Stop", "East", "South"], 61], [["East", "South"], 41]]
+[[["Stop", "East", "South"], 61], [["East", "South"], 41]]
+[[["Stop", "East", "South"], 81], [["East", "South"], 53]]
+[[["Stop", "East", "South"], 102], [["East", "South"], 67]]
+[[["Stop", "South"], 23], [["South"], 13]]
+[[["South"], 15], [["South"], 9]]
+[[["South"], 15], [["South"], 9]]
+[[["South"], 15], [["South"], 9]]
+[[["South"], 15], [["South"], 9]]
+[[["East"], 15], [["East"], 9]]
+[[["East"], 15], [["East"], 9]]
+[[["East"], 18], [["East"], 12]]
+[[["East", "North"], 28], [["East", "North"], 18]]
+[[["East"], 35], [["East"], 22]]
+[[["North"], 28], [["North"], 18]]
+[[["North"], 38], [["North"], 22]]
+[[["East"], 30], [["East"], 20]]
+[[["East"], 33], [["East"], 18]]
+[[["East"], 18], [["East"], 12]]
+[[["East"], 33], [["East"], 23]]
+[[["East"], 48], [["East"], 29]]
+[[["East"], 49], [["East"], 26]]
+[[["East"], 44], [["East"], 29]]
+[[["North", "South"], 75], [["North", "South"], 52]]
+[[["East", "North"], 113], [["East", "North"], 74]]
+[[["East", "North"], 89], [["East", "North"], 65]]
+[[["North", "South"], 173], [["North", "South"], 130]]
+[[["West", "East"], 54], [["West", "East"], 42]]
+[[["West", "Stop", "East"], 130], [["West", "East"], 87]]
+[[["Stop", "East", "South"], 58], [["East", "South"], 43]]
+[[["Stop", "East"], 110], [["South"], 82]]
+[[["East"], 71], [["East"], 45]]
+[[["West"], 66], [["West"], 42]]
+[[["South"], 70], [["South"], 48]]
+[[["South"], 48], [["South"], 37]]
+[[["South"], 179], [["South"], 104]]
+[[["South"], 68], [["South"], 41]]
+[[["West"], 25], [["West"], 18]]
+[[["West"], 41], [["West"], 29]]
+[[["West"], 14], [["West"], 10]]
+[[["West"], 20], [["West"], 14]]
+[[["West"], 13], [["West"], 9]]
+[[["West"], 13], [["West"], 9]]
+[[["West"], 16], [["West"], 12]]
+[[["West", "North"], 28], [["West", "North"], 20]]
+[[["West"], 30], [["West"], 23]]
+[[["West", "Stop", "East", "North"], 66], [["West", "East", "North"], 46]]
+[[["West", "Stop", "East"], 126], [["West", "East"], 89]]
+[[["West", "Stop", "East"], 30], [["West", "East"], 20]]
+[[["Stop", "East", "North"], 67], [["East", "North"], 45]]
+[[["Stop", "North"], 52], [["North"], 27]]
+[[["North"], 30], [["North"], 19]]
+[[["North"], 28], [["North"], 17]]
+[[["North"], 17], [["North"], 11]]
+[[["North"], 30], [["North"], 19]]
+[[["East"], 28], [["East"], 17]]
+[[["East"], 17], [["East"], 11]]
+[[["East"], 40], [["East"], 29]]
+[[["East", "South"], 79], [["East", "South"], 60]]
+[[["East", "South"], 90], [["East", "South"], 62]]
+[[["South"], 96], [["South"], 61]]
+[[["North", "South"], 195], [["North", "South"], 132]]
+[[["West"], 80], [["West"], 60]]
+[[["West", "Stop", "East", "South"], 83], [["West", "East", "South"], 61]]
+[[["West", "Stop", "East", "South"], 90], [["West", "East", "South"], 66]]
+[[["West", "Stop", "East"], 134], [["West", "East"], 95]]
+[[["West", "Stop", "East"], 82], [["West", "East"], 55]]
+[[["Stop", "East", "South"], 142], [["East", "South"], 95]]
+[[["Stop", "East", "South"], 95], [["East", "South"], 62]]
+[[["Stop", "East", "South"], 128], [["East", "South"], 86]]
+[[["Stop", "East", "South"], 82], [["East", "South"], 55]]
+[[["Stop", "East", "South"], 85], [["East", "South"], 57]]
+[[["Stop", "East", "South"], 182], [["East", "South"], 119]]
+[[["Stop", "East", "South"], 154], [["East", "South"], 103]]
+[[["Stop", "South"], 46], [["South"], 25]]
+[[["South"], 28], [["South"], 17]]
+[[["South"], 15], [["South"], 9]]
+[[["South"], 15], [["South"], 9]]
+[[["South"], 15], [["South"], 9]]
+[[["East"], 15], [["East"], 9]]
+[[["East"], 15], [["East"], 9]]
+[[["East"], 18], [["East"], 12]]
+[[["East", "North"], 28], [["East", "North"], 18]]
+[[["East"], 37], [["East"], 22]]
+[[["East", "North"], 34], [["East", "North"], 24]]
+[[["East"], 56], [["East"], 29]]
+[[["East"], 17], [["East"], 11]]
+[[["East"], 26], [["East"], 15]]
+[[["East"], 15], [["East"], 9]]
+[[["East"], 15], [["East"], 9]]
+[[["East"], 18], [["East"], 12]]
+[[["East"], 27], [["East"], 17]]
+[[["East"], 37], [["East"], 22]]
+[[["East", "North"], 37], [["East", "North"], 24]]
+[[["East"], 56], [["East"], 29]]
+[[["East"], 19], [["East"], 11]]
+[[["North"], 26], [["North"], 15]]
+[[["North"], 19], [["North"], 11]]
+[[["North"], 30], [["North"], 17]]
+[[["North"], 30], [["North"], 19]]
+[[["West"], 28], [["West"], 19]]
+[[["West"], 23], [["West"], 13]]
+[[["West", "Stop", "East"], 7], [["West", "East"], 3]]
+"""
+altDepthActions: """
+[["West", "East"], ["West", "East"], ["West", "East"], ["West", "East"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West", "North"], ["West", "North"], ["West", "North"], ["West", "North"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West", "North"], ["West", "North"], ["West", "North"], ["West", "North"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["North"], ["North"], ["Stop", "North"], ["North"]]
+[["East"], ["East"], ["Stop", "East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["North"], ["North"], ["West"], ["West"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["East", "North"], ["East", "North"], ["East", "North"], ["East", "North"]]
+[["East", "North"], ["East", "North"], ["East", "North"], ["East", "North"]]
+[["North", "South"], ["North", "South"], ["North"], ["North"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West"], ["West"], ["West", "Stop"], ["West"]]
+[["West"], ["West"], ["West", "Stop", "East", "South"], ["West", "East", "South"]]
+[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]]
+[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "South"], ["South"], ["Stop", "South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East", "North"], ["East", "North"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["North", "South"], ["North", "South"], ["South"], ["South"]]
+[["East", "North"], ["East", "North"], ["East", "North"], ["East", "North"]]
+[["East", "North"], ["East", "North"], ["East", "North"], ["East", "North"]]
+[["North", "South"], ["North", "South"], ["North"], ["North"]]
+[["West", "East"], ["West", "East"], ["East"], ["East"]]
+[["West"], ["West"], ["East"], ["East"]]
+[["Stop", "East", "South"], ["East", "South"], ["East"], ["East"]]
+[["Stop", "East"], ["East"], ["Stop", "East"], ["South"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["West", "East"], ["West", "East"], ["West"], ["West"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West", "North"], ["West", "North"], ["West", "North"], ["West", "North"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West", "Stop", "East", "North"], ["West", "East", "North"], ["West", "Stop", "East", "North"], ["West", "East", "North"]]
+[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]]
+[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]]
+[["Stop", "East", "North"], ["East", "North"], ["Stop", "East", "North"], ["East", "North"]]
+[["Stop", "North"], ["North"], ["Stop", "North"], ["North"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East", "South"], ["East", "South"], ["East", "South"], ["East", "South"]]
+[["East", "South"], ["East", "South"], ["East", "South"], ["East", "South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["North", "South"], ["North", "South"], ["North", "South"], ["North", "South"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West", "Stop", "East", "South"], ["West", "East", "South"], ["West", "Stop", "East", "South"], ["West", "East", "South"]]
+[["West", "Stop", "East", "South"], ["West", "East", "South"], ["West", "Stop", "East", "South"], ["West", "East", "South"]]
+[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]]
+[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "South"], ["South"], ["Stop", "South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East", "North"], ["East", "North"], ["East", "North"], ["East", "North"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East", "North"], ["East", "North"], ["East", "North"], ["East", "North"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East", "North"], ["East", "North"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]]
+"""
+partialPlyBugActions: """
+[["West", "East"], ["West", "East"]]
+[["West"], ["West"]]
+[["West"], ["West"]]
+[["West", "North"], ["West", "North"]]
+[["West"], ["West"]]
+[["West", "North"], ["West", "North"]]
+[["West"], ["West"]]
+[["West"], ["West"]]
+[["North"], ["North"]]
+[["North"], ["North"]]
+[["North"], ["North"]]
+[["North"], ["North"]]
+[["East"], ["East"]]
+[["Stop", "East"], ["East"]]
+[["West"], ["West"]]
+[["South"], ["South"]]
+[["South"], ["South"]]
+[["South"], ["South"]]
+[["South"], ["South"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["North"], ["North"]]
+[["North"], ["North"]]
+[["West"], ["West"]]
+[["East"], ["East"]]
+[["North"], ["North"]]
+[["East", "North"], ["East", "North"]]
+[["East", "North"], ["East", "North"]]
+[["North", "South"], ["North", "South"]]
+[["West"], ["West"]]
+[["West"], ["West"]]
+[["West", "Stop"], ["West"]]
+[["West", "Stop", "East"], ["West", "East"]]
+[["West", "Stop", "East"], ["West", "East"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "South"], ["South"]]
+[["South"], ["South"]]
+[["South"], ["South"]]
+[["South"], ["South"]]
+[["South"], ["South"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East", "North"], ["East", "North"]]
+[["East"], ["East"]]
+[["North"], ["North"]]
+[["North"], ["North"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["North", "South"], ["North", "South"]]
+[["East", "North"], ["East", "North"]]
+[["East", "North"], ["East", "North"]]
+[["North", "South"], ["North", "South"]]
+[["West", "East"], ["West", "East"]]
+[["West", "Stop", "East"], ["West", "East"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East"], ["East"]]
+[["East"], ["East"]]
+[["West"], ["West"]]
+[["South"], ["South"]]
+[["South"], ["South"]]
+[["South"], ["South"]]
+[["South"], ["South"]]
+[["West"], ["West"]]
+[["West"], ["West"]]
+[["West"], ["West"]]
+[["West"], ["West"]]
+[["West"], ["West"]]
+[["West"], ["West"]]
+[["West"], ["West"]]
+[["West", "North"], ["West", "North"]]
+[["West"], ["West"]]
+[["West", "Stop", "East", "North"], ["West", "East", "North"]]
+[["West", "Stop", "East"], ["West", "East"]]
+[["West", "Stop", "East"], ["West", "East"]]
+[["Stop", "East", "North"], ["East", "North"]]
+[["Stop", "North"], ["North"]]
+[["North"], ["North"]]
+[["North"], ["North"]]
+[["North"], ["North"]]
+[["North"], ["North"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East", "South"], ["East", "South"]]
+[["East", "South"], ["East", "South"]]
+[["South"], ["South"]]
+[["North", "South"], ["North", "South"]]
+[["West"], ["West"]]
+[["West", "Stop", "East", "South"], ["West", "East", "South"]]
+[["West", "Stop", "East", "South"], ["West", "East", "South"]]
+[["West", "Stop", "East"], ["West", "East"]]
+[["West", "Stop", "East"], ["West", "East"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "South"], ["South"]]
+[["South"], ["South"]]
+[["South"], ["South"]]
+[["South"], ["South"]]
+[["South"], ["South"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East", "North"], ["East", "North"]]
+[["East"], ["East"]]
+[["East", "North"], ["East", "North"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East", "North"], ["East", "North"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["North"], ["North"]]
+[["North"], ["North"]]
+[["North"], ["North"]]
+[["North"], ["North"]]
+[["West"], ["West"]]
+[["West"], ["West"]]
+[["West", "Stop", "East"], ["West", "East"]]
+"""
diff --git a/test_cases/q3/8-pacman-game.test b/test_cases/q3/8-pacman-game.test
new file mode 100644
index 0000000..56341ae
--- /dev/null
+++ b/test_cases/q3/8-pacman-game.test
@@ -0,0 +1,19 @@
+class: "PacmanGameTreeTest"
+alg: "AlphaBetaAgent"
+seed: "0"
+depth: "2"
+max_points: "4"
+
+# The following specifies the layout to be used
+layoutName: "smallClassic"
+layout: """
+%%%%%%%%%%%%%%%%%%%%
+%......%G G%......%
+%.%%...%% %%...%%.%
+%.%o.%........%.o%.%
+%.%%.%.%%%%%%.%.%%.%
+%........P.........%
+%%%%%%%%%%%%%%%%%%%%
+"""
+
+
diff --git a/test_cases/q3/CONFIG b/test_cases/q3/CONFIG
new file mode 100644
index 0000000..a5adc3f
--- /dev/null
+++ b/test_cases/q3/CONFIG
@@ -0,0 +1,2 @@
+max_points: "5"
+class: "PassAllTestsQuestion"
diff --git a/test_cases/q4/0-expectimax1.solution b/test_cases/q4/0-expectimax1.solution
new file mode 100644
index 0000000..ff7dfe6
--- /dev/null
+++ b/test_cases/q4/0-expectimax1.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q4/0-expectimax1.test.
+action: "Left"
+generated: "A B C D E F G H I exp1 exp2 exp3 max"
diff --git a/test_cases/q4/0-expectimax1.test b/test_cases/q4/0-expectimax1.test
new file mode 100644
index 0000000..deee59a
--- /dev/null
+++ b/test_cases/q4/0-expectimax1.test
@@ -0,0 +1,48 @@
+class: "GraphGameTreeTest"
+alg: "ExpectimaxAgent"
+depth: "2"
+
+# Tree adapted from lecture 6 slides
+diagram: """
+ max
+ /-/ | \--\
+ / | \
+ / | \
+ exp1 exp2 exp3
+ /|\ /|\ /|\
+ / | \ / | \ / | \
+A B C D E F G H I
+3 12 8 2 4 6 14 5 2
+"""
+num_agents: "2"
+
+start_state: "max"
+win_states: "A B C D E F G H I"
+lose_states: ""
+
+successors: """
+max Left exp1
+max Center exp2
+max Right exp3
+exp1 Left A
+exp1 Center B
+exp1 Right C
+exp2 Left D
+exp2 Center E
+exp2 Right F
+exp3 Left G
+exp3 Center H
+exp3 Right I
+"""
+
+evaluation: """
+A 3.0
+B 12.0
+C 8.0
+D 2.0
+E 4.0
+F 6.0
+G 14.0
+H 5.0
+I 2.0
+"""
diff --git a/test_cases/q4/1-expectimax2.solution b/test_cases/q4/1-expectimax2.solution
new file mode 100644
index 0000000..30c8768
--- /dev/null
+++ b/test_cases/q4/1-expectimax2.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q4/1-expectimax2.test.
+action: "Left"
+generated: "A B C D E F G H I exp1 exp2 exp3 max"
diff --git a/test_cases/q4/1-expectimax2.test b/test_cases/q4/1-expectimax2.test
new file mode 100644
index 0000000..d220fe6
--- /dev/null
+++ b/test_cases/q4/1-expectimax2.test
@@ -0,0 +1,48 @@
+class: "GraphGameTreeTest"
+alg: "ExpectimaxAgent"
+depth: "2"
+
+# Tree from lecture 7 slides
+diagram: """
+ max
+ /-/ | \--\
+ / | \
+ / | \
+ exp1 exp2 exp3
+ /|\ /|\ /|\
+ / | \ / | \ / | \
+A B C D E F G H I
+3 12 9 2 4 6 15 6 0
+"""
+num_agents: "2"
+
+start_state: "max"
+win_states: "A B C D E F G H I"
+lose_states: ""
+
+successors: """
+max Left exp1
+max Center exp2
+max Right exp3
+exp1 Left A
+exp1 Center B
+exp1 Right C
+exp2 Left D
+exp2 Center E
+exp2 Right F
+exp3 Left G
+exp3 Center H
+exp3 Right I
+"""
+
+evaluation: """
+A 3.0
+B 12.0
+C 9.0
+D 2.0
+E 4.0
+F 6.0
+G 15.0
+H 6.0
+I 0.0
+"""
diff --git a/test_cases/q4/2-one-ghost-3level.solution b/test_cases/q4/2-one-ghost-3level.solution
new file mode 100644
index 0000000..bde5c83
--- /dev/null
+++ b/test_cases/q4/2-one-ghost-3level.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q4/2-one-ghost-3level.test.
+action: "Left"
+generated: "a b1 b2 c1 c2 c3 c4 d1 d2 d3 d4 d5 d6 d7 d8"
diff --git a/test_cases/q4/2-one-ghost-3level.test b/test_cases/q4/2-one-ghost-3level.test
new file mode 100644
index 0000000..5778a1d
--- /dev/null
+++ b/test_cases/q4/2-one-ghost-3level.test
@@ -0,0 +1,52 @@
+class: "GraphGameTreeTest"
+alg: "ExpectimaxAgent"
+depth: "3"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ / \ / \
+ c1 c2 c3 c4
+ / \ / \ / \ / \
+ d1 d2 d3 d4 d5 d6 d7 d8
+ 3 9 10 6 4 7 0 5
+
+a - max
+b - exp
+c - max
+"""
+num_agents: "2"
+
+start_state: "a"
+win_states: "d1 d2 d3 d4 d5 d6 d7 d8"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Left c1
+b1 Right c2
+b2 Left c3
+b2 Right c4
+c1 Left d1
+c1 Right d2
+c2 Left d3
+c2 Right d4
+c3 Left d5
+c3 Right d6
+c4 Left d7
+c4 Right d8
+"""
+
+evaluation: """
+d1 3.0
+d2 9.0
+d3 10.0
+d4 6.0
+d5 4.0
+d6 7.0
+d7 0.0
+d8 5.0
+"""
diff --git a/test_cases/q4/3-one-ghost-4level.solution b/test_cases/q4/3-one-ghost-4level.solution
new file mode 100644
index 0000000..4ba2b47
--- /dev/null
+++ b/test_cases/q4/3-one-ghost-4level.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q4/3-one-ghost-4level.test.
+action: "Right"
+generated: "A B C D E F G H I J K L M N O P a b1 b2 c1 c2 c3 c4 d1 d2 d3 d4 d5 d6 d7 d8"
diff --git a/test_cases/q4/3-one-ghost-4level.test b/test_cases/q4/3-one-ghost-4level.test
new file mode 100644
index 0000000..6490a48
--- /dev/null
+++ b/test_cases/q4/3-one-ghost-4level.test
@@ -0,0 +1,79 @@
+class: "GraphGameTreeTest"
+alg: "ExpectimaxAgent"
+depth: "4"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ / \ / \
+ c1 c2 c3 c4
+ / \ / \ / \ / \
+ d1 d2 d3 d4 d5 d6 d7 d8
+/ \ / \ / \ / \ / \ / \ / \ / \
+A B C D E F G H I J K L M N O P
+3 13 5 9 10 11 6 8 1 0 4 7 12 15 2 14
+
+a - max
+b - exp
+c - max
+d - exp
+"""
+num_agents: "2"
+
+start_state: "a"
+win_states: "A B C D E F G H I J K L M N O P"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Left c1
+b1 Right c2
+b2 Left c3
+b2 Right c4
+c1 Left d1
+c1 Right d2
+c2 Left d3
+c2 Right d4
+c3 Left d5
+c3 Right d6
+c4 Left d7
+c4 Right d8
+d1 Left A
+d1 Right B
+d2 Left C
+d2 Right D
+d3 Left E
+d3 Right F
+d4 Left G
+d4 Right H
+d5 Left I
+d5 Right J
+d6 Left K
+d6 Right L
+d7 Left M
+d7 Right N
+d8 Left O
+d8 Right P
+"""
+
+evaluation: """
+A 3.0
+B 13.0
+C 5.0
+D 9.0
+E 10.0
+F 11.0
+G 6.0
+H 8.0
+I 1.0
+J 0.0
+K 4.0
+L 7.0
+M 12.0
+N 15.0
+O 2.0
+P 14.0
+"""
diff --git a/test_cases/q4/4-two-ghosts-3level.solution b/test_cases/q4/4-two-ghosts-3level.solution
new file mode 100644
index 0000000..6dbd664
--- /dev/null
+++ b/test_cases/q4/4-two-ghosts-3level.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q4/4-two-ghosts-3level.test.
+action: "Left"
+generated: "a b1 b2 c1 c2 c3 c4 d1 d2 d3 d4 d5 d6 d7 d8"
diff --git a/test_cases/q4/4-two-ghosts-3level.test b/test_cases/q4/4-two-ghosts-3level.test
new file mode 100644
index 0000000..d39bf1c
--- /dev/null
+++ b/test_cases/q4/4-two-ghosts-3level.test
@@ -0,0 +1,52 @@
+class: "GraphGameTreeTest"
+alg: "ExpectimaxAgent"
+depth: "3"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ / \ / \
+ c1 c2 c3 c4
+ / \ / \ / \ / \
+ d1 d2 d3 d4 d5 d6 d7 d8
+ 3 9 10 6 4 7 0 5
+
+a - max
+b - exp
+c - exp
+"""
+num_agents: "3"
+
+start_state: "a"
+win_states: "d1 d2 d3 d4 d5 d6 d7 d8"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Left c1
+b1 Right c2
+b2 Left c3
+b2 Right c4
+c1 Left d1
+c1 Right d2
+c2 Left d3
+c2 Right d4
+c3 Left d5
+c3 Right d6
+c4 Left d7
+c4 Right d8
+"""
+
+evaluation: """
+d1 3.0
+d2 9.0
+d3 10.0
+d4 6.0
+d5 4.0
+d6 7.0
+d7 0.0
+d8 5.0
+"""
diff --git a/test_cases/q4/5-two-ghosts-4level.solution b/test_cases/q4/5-two-ghosts-4level.solution
new file mode 100644
index 0000000..caccbea
--- /dev/null
+++ b/test_cases/q4/5-two-ghosts-4level.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q4/5-two-ghosts-4level.test.
+action: "Left"
+generated: "A B C D E F G H I J K L M N O P a b1 b2 c1 c2 c3 c4 d1 d2 d3 d4 d5 d6 d7 d8"
diff --git a/test_cases/q4/5-two-ghosts-4level.test b/test_cases/q4/5-two-ghosts-4level.test
new file mode 100644
index 0000000..b2d2733
--- /dev/null
+++ b/test_cases/q4/5-two-ghosts-4level.test
@@ -0,0 +1,79 @@
+class: "GraphGameTreeTest"
+alg: "ExpectimaxAgent"
+depth: "4"
+
+diagram: """
+ /-----a------\
+ / \
+ / \
+ b1 b2
+ / \ / \
+ c1 c2 c3 c4
+ / \ / \ / \ / \
+ d1 d2 d3 d4 d5 d6 d7 d8
+/ \ / \ / \ / \ / \ / \ / \ / \
+A B C D E F G H I J K L M N O P
+3 13 5 9 10 11 6 8 1 0 4 7 12 15 2 14
+
+a - max
+b - exp
+c - exp
+d - max
+"""
+num_agents: "3"
+
+start_state: "a"
+win_states: "A B C D E F G H I J K L M N O P"
+lose_states: ""
+
+successors: """
+a Left b1
+a Right b2
+b1 Left c1
+b1 Right c2
+b2 Left c3
+b2 Right c4
+c1 Left d1
+c1 Right d2
+c2 Left d3
+c2 Right d4
+c3 Left d5
+c3 Right d6
+c4 Left d7
+c4 Right d8
+d1 Left A
+d1 Right B
+d2 Left C
+d2 Right D
+d3 Left E
+d3 Right F
+d4 Left G
+d4 Right H
+d5 Left I
+d5 Right J
+d6 Left K
+d6 Right L
+d7 Left M
+d7 Right N
+d8 Left O
+d8 Right P
+"""
+
+evaluation: """
+A 3.0
+B 13.0
+C 5.0
+D 9.0
+E 10.0
+F 11.0
+G 6.0
+H 8.0
+I 1.0
+J 0.0
+K 4.0
+L 7.0
+M 12.0
+N 15.0
+O 2.0
+P 14.0
+"""
diff --git a/test_cases/q4/6-1a-check-depth-one-ghost.solution b/test_cases/q4/6-1a-check-depth-one-ghost.solution
new file mode 100644
index 0000000..22aebdc
--- /dev/null
+++ b/test_cases/q4/6-1a-check-depth-one-ghost.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q4/6-1a-check-depth-one-ghost.test.
+action: "Left"
+generated: "a b1 b2 b3 c1 c2 c3"
diff --git a/test_cases/q4/6-1a-check-depth-one-ghost.test b/test_cases/q4/6-1a-check-depth-one-ghost.test
new file mode 100644
index 0000000..4d36ea5
--- /dev/null
+++ b/test_cases/q4/6-1a-check-depth-one-ghost.test
@@ -0,0 +1,83 @@
+class: "GraphGameTreeTest"
+alg: "ExpectimaxAgent"
+depth: "1"
+
+diagram: """
+ a
+ /-/ | \--\
+ / | \
+ 0 b1 0 b2 b3 8
+ | | |
+ 10 c1 0 c2 c3 8
+ | | |
+ 0 d1 0 d2 d3 8
+ | | |
+ 0 e1 10 e2 e3 8
+ | | |
+ 0 f1 0 f2 f3 8
+ | | |
+ g1 g2 g3
+ 0 0 8
+
+a - max
+b - min
+c - max
+d - min
+e - max
+f - min
+
+At depth 1, the evaluation function is called at level c,
+so Left should be returned. If your algorithm is returning a
+different action, check how you implemented your depth.
+"""
+
+num_agents: "2"
+
+start_state: "a"
+win_states: "g1 g2 g3"
+lose_states: ""
+
+successors: """
+a Left b1
+a Center b2
+a Right b3
+b1 Center c1
+b2 Center c2
+b3 Center c3
+c1 Center d1
+c2 Center d2
+c3 Center d3
+d1 Center e1
+d2 Center e2
+d3 Center e3
+e1 Center f1
+e2 Center f2
+e3 Center f3
+f1 Center g1
+f2 Center g2
+f3 Center g3
+"""
+
+
+evaluation: """
+b1 0.0
+b2 0.0
+b3 8.0
+c1 10.0
+c2 0.0
+c3 8.0
+d1 0.0
+d2 0.0
+d3 8.0
+e1 0.0
+e2 10.0
+e3 8.0
+f1 0.0
+f2 0.0
+f3 8.0
+g1 0.0
+g2 0.0
+g3 8.0
+"""
+
+
diff --git a/test_cases/q4/6-1b-check-depth-one-ghost.solution b/test_cases/q4/6-1b-check-depth-one-ghost.solution
new file mode 100644
index 0000000..e34e3d3
--- /dev/null
+++ b/test_cases/q4/6-1b-check-depth-one-ghost.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q4/6-1b-check-depth-one-ghost.test.
+action: "Center"
+generated: "a b1 b2 b3 c1 c2 c3 d1 d2 d3 e1 e2 e3"
diff --git a/test_cases/q4/6-1b-check-depth-one-ghost.test b/test_cases/q4/6-1b-check-depth-one-ghost.test
new file mode 100644
index 0000000..a63d167
--- /dev/null
+++ b/test_cases/q4/6-1b-check-depth-one-ghost.test
@@ -0,0 +1,83 @@
+class: "GraphGameTreeTest"
+alg: "ExpectimaxAgent"
+depth: "2"
+
+diagram: """
+ a
+ /-/ | \--\
+ / | \
+ 0 b1 0 b2 b3 8
+ | | |
+ 10 c1 0 c2 c3 8
+ | | |
+ 0 d1 0 d2 d3 8
+ | | |
+ 0 e1 10 e2 e3 8
+ | | |
+ 0 f1 0 f2 f3 8
+ | | |
+ g1 g2 g3
+ 0 0 8
+
+a - max
+b - min
+c - max
+d - min
+e - max
+f - min
+
+At depth 2, the evaluation function is called at level e,
+so Center should be returned. If your algorithm is returning a
+different action, check how you implemented your depth.
+"""
+
+num_agents: "2"
+
+start_state: "a"
+win_states: "g1 g2 g3"
+lose_states: ""
+
+successors: """
+a Left b1
+a Center b2
+a Right b3
+b1 Center c1
+b2 Center c2
+b3 Center c3
+c1 Center d1
+c2 Center d2
+c3 Center d3
+d1 Center e1
+d2 Center e2
+d3 Center e3
+e1 Center f1
+e2 Center f2
+e3 Center f3
+f1 Center g1
+f2 Center g2
+f3 Center g3
+"""
+
+
+evaluation: """
+b1 0.0
+b2 0.0
+b3 8.0
+c1 10.0
+c2 0.0
+c3 8.0
+d1 0.0
+d2 0.0
+d3 8.0
+e1 0.0
+e2 10.0
+e3 8.0
+f1 0.0
+f2 0.0
+f3 8.0
+g1 0.0
+g2 0.0
+g3 8.0
+"""
+
+
diff --git a/test_cases/q4/6-1c-check-depth-one-ghost.solution b/test_cases/q4/6-1c-check-depth-one-ghost.solution
new file mode 100644
index 0000000..cd33b5a
--- /dev/null
+++ b/test_cases/q4/6-1c-check-depth-one-ghost.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q4/6-1c-check-depth-one-ghost.test.
+action: "Right"
+generated: "a b1 b2 b3 c1 c2 c3 d1 d2 d3 e1 e2 e3 f1 f2 f3 g1 g2 g3"
diff --git a/test_cases/q4/6-1c-check-depth-one-ghost.test b/test_cases/q4/6-1c-check-depth-one-ghost.test
new file mode 100644
index 0000000..84e78f0
--- /dev/null
+++ b/test_cases/q4/6-1c-check-depth-one-ghost.test
@@ -0,0 +1,83 @@
+class: "GraphGameTreeTest"
+alg: "ExpectimaxAgent"
+depth: "3"
+
+diagram: """
+ a
+ /-/ | \--\
+ / | \
+ 0 b1 0 b2 b3 8
+ | | |
+ 10 c1 0 c2 c3 8
+ | | |
+ 0 d1 0 d2 d3 8
+ | | |
+ 0 e1 10 e2 e3 8
+ | | |
+ 0 f1 0 f2 f3 8
+ | | |
+ g1 g2 g3
+ 0 0 8
+
+a - max
+b - min
+c - max
+d - min
+e - max
+f - min
+
+At depth 3, the evaluation function is called at level g,
+so Right should be returned. If your algorithm is returning a
+different action, check how you implemented your depth.
+"""
+
+num_agents: "2"
+
+start_state: "a"
+win_states: "g1 g2 g3"
+lose_states: ""
+
+successors: """
+a Left b1
+a Center b2
+a Right b3
+b1 Center c1
+b2 Center c2
+b3 Center c3
+c1 Center d1
+c2 Center d2
+c3 Center d3
+d1 Center e1
+d2 Center e2
+d3 Center e3
+e1 Center f1
+e2 Center f2
+e3 Center f3
+f1 Center g1
+f2 Center g2
+f3 Center g3
+"""
+
+
+evaluation: """
+b1 0.0
+b2 0.0
+b3 8.0
+c1 10.0
+c2 0.0
+c3 8.0
+d1 0.0
+d2 0.0
+d3 8.0
+e1 0.0
+e2 10.0
+e3 8.0
+f1 0.0
+f2 0.0
+f3 8.0
+g1 0.0
+g2 0.0
+g3 8.0
+"""
+
+
diff --git a/test_cases/q4/6-2a-check-depth-two-ghosts.solution b/test_cases/q4/6-2a-check-depth-two-ghosts.solution
new file mode 100644
index 0000000..0e9c9e9
--- /dev/null
+++ b/test_cases/q4/6-2a-check-depth-two-ghosts.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q4/6-2a-check-depth-two-ghosts.test.
+action: "Left"
+generated: "a b1 b2 b3 c1 c2 c3 d1 d2 d3"
diff --git a/test_cases/q4/6-2a-check-depth-two-ghosts.test b/test_cases/q4/6-2a-check-depth-two-ghosts.test
new file mode 100644
index 0000000..36ecd4e
--- /dev/null
+++ b/test_cases/q4/6-2a-check-depth-two-ghosts.test
@@ -0,0 +1,110 @@
+class: "GraphGameTreeTest"
+alg: "ExpectimaxAgent"
+depth: "1"
+
+diagram: """
+ a
+ /-/ | \--\
+ / | \
+ 0 b1 0 b2 b3 8
+ | | |
+ 0 c1 0 c2 c3 8
+ | | |
+ 10 d1 0 d2 d3 8
+ | | |
+ 0 e1 0 e2 e3 8
+ | | |
+ 0 f1 0 f2 f3 8
+ | | |
+ 0 g1 10 g2 g3 8
+ | | |
+ 0 h1 0 h2 h3 8
+ | | |
+ 0 i1 0 i2 i3 8
+ | | |
+ j1 j2 j3
+ 0 0 8
+
+a - max
+b - min
+c - min
+d - max
+e - min
+f - min
+g - max
+h - min
+i - min
+
+At depth 1, the evaluation function is called at level d,
+so Left should be returned. If your algorithm is returning a
+different action, check how you implemented your depth.
+"""
+
+num_agents: "3"
+
+start_state: "a"
+win_states: "j1 j2 j3"
+lose_states: ""
+
+successors: """
+a Left b1
+a Center b2
+a Right b3
+b1 Center c1
+b2 Center c2
+b3 Center c3
+c1 Center d1
+c2 Center d2
+c3 Center d3
+d1 Center e1
+d2 Center e2
+d3 Center e3
+e1 Center f1
+e2 Center f2
+e3 Center f3
+f1 Center g1
+f2 Center g2
+f3 Center g3
+g1 Center h1
+g2 Center h2
+g3 Center h3
+h1 Center i1
+h2 Center i2
+h3 Center i3
+i1 Center j1
+i2 Center j2
+i3 Center j3
+"""
+
+
+evaluation: """
+b1 0.0
+b2 0.0
+b3 8.0
+c1 0.0
+c2 0.0
+c3 8.0
+d1 10.0
+d2 0.0
+d3 8.0
+e1 0.0
+e2 0.0
+e3 8.0
+f1 0.0
+f2 0.0
+f3 8.0
+g1 0.0
+g2 10.0
+g3 8.0
+h1 0.0
+h2 0.0
+h3 8.0
+i1 0.0
+i2 0.0
+i3 8.0
+j1 0.0
+j2 0.0
+j3 8.0
+"""
+
+
diff --git a/test_cases/q4/6-2b-check-depth-two-ghosts.solution b/test_cases/q4/6-2b-check-depth-two-ghosts.solution
new file mode 100644
index 0000000..3fcde21
--- /dev/null
+++ b/test_cases/q4/6-2b-check-depth-two-ghosts.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q4/6-2b-check-depth-two-ghosts.test.
+action: "Center"
+generated: "a b1 b2 b3 c1 c2 c3 d1 d2 d3 e1 e2 e3 f1 f2 f3 g1 g2 g3"
diff --git a/test_cases/q4/6-2b-check-depth-two-ghosts.test b/test_cases/q4/6-2b-check-depth-two-ghosts.test
new file mode 100644
index 0000000..d1b85b7
--- /dev/null
+++ b/test_cases/q4/6-2b-check-depth-two-ghosts.test
@@ -0,0 +1,110 @@
+class: "GraphGameTreeTest"
+alg: "ExpectimaxAgent"
+depth: "2"
+
+diagram: """
+ a
+ /-/ | \--\
+ / | \
+ 0 b1 0 b2 b3 8
+ | | |
+ 0 c1 0 c2 c3 8
+ | | |
+ 10 d1 0 d2 d3 8
+ | | |
+ 0 e1 0 e2 e3 8
+ | | |
+ 0 f1 0 f2 f3 8
+ | | |
+ 0 g1 10 g2 g3 8
+ | | |
+ 0 h1 0 h2 h3 8
+ | | |
+ 0 i1 0 i2 i3 8
+ | | |
+ j1 j2 j3
+ 0 0 8
+
+a - max
+b - min
+c - min
+d - max
+e - min
+f - min
+g - max
+h - min
+i - min
+
+At depth 2, the evaluation function is called at level g,
+so Center should be returned. If your algorithm is returning
+a different action, check how you implemented your depth.
+"""
+
+num_agents: "3"
+
+start_state: "a"
+win_states: "j1 j2 j3"
+lose_states: ""
+
+successors: """
+a Left b1
+a Center b2
+a Right b3
+b1 Center c1
+b2 Center c2
+b3 Center c3
+c1 Center d1
+c2 Center d2
+c3 Center d3
+d1 Center e1
+d2 Center e2
+d3 Center e3
+e1 Center f1
+e2 Center f2
+e3 Center f3
+f1 Center g1
+f2 Center g2
+f3 Center g3
+g1 Center h1
+g2 Center h2
+g3 Center h3
+h1 Center i1
+h2 Center i2
+h3 Center i3
+i1 Center j1
+i2 Center j2
+i3 Center j3
+"""
+
+
+evaluation: """
+b1 0.0
+b2 0.0
+b3 8.0
+c1 0.0
+c2 0.0
+c3 8.0
+d1 10.0
+d2 0.0
+d3 8.0
+e1 0.0
+e2 0.0
+e3 8.0
+f1 0.0
+f2 0.0
+f3 8.0
+g1 0.0
+g2 10.0
+g3 8.0
+h1 0.0
+h2 0.0
+h3 8.0
+i1 0.0
+i2 0.0
+i3 8.0
+j1 0.0
+j2 0.0
+j3 8.0
+"""
+
+
diff --git a/test_cases/q4/6-2c-check-depth-two-ghosts.solution b/test_cases/q4/6-2c-check-depth-two-ghosts.solution
new file mode 100644
index 0000000..bad1cf9
--- /dev/null
+++ b/test_cases/q4/6-2c-check-depth-two-ghosts.solution
@@ -0,0 +1,3 @@
+# This is the solution file for test_cases/q4/6-2c-check-depth-two-ghosts.test.
+action: "Right"
+generated: "a b1 b2 b3 c1 c2 c3 d1 d2 d3 e1 e2 e3 f1 f2 f3 g1 g2 g3 h1 h2 h3 i1 i2 i3 j1 j2 j3"
diff --git a/test_cases/q4/6-2c-check-depth-two-ghosts.test b/test_cases/q4/6-2c-check-depth-two-ghosts.test
new file mode 100644
index 0000000..6c4c9ea
--- /dev/null
+++ b/test_cases/q4/6-2c-check-depth-two-ghosts.test
@@ -0,0 +1,110 @@
+class: "GraphGameTreeTest"
+alg: "ExpectimaxAgent"
+depth: "3"
+
+diagram: """
+ a
+ /-/ | \--\
+ / | \
+ 0 b1 0 b2 b3 8
+ | | |
+ 0 c1 0 c2 c3 8
+ | | |
+ 10 d1 0 d2 d3 8
+ | | |
+ 0 e1 0 e2 e3 8
+ | | |
+ 0 f1 0 f2 f3 8
+ | | |
+ 0 g1 10 g2 g3 8
+ | | |
+ 0 h1 0 h2 h3 8
+ | | |
+ 0 i1 0 i2 i3 8
+ | | |
+ j1 j2 j3
+ 0 0 8
+
+a - max
+b - min
+c - min
+d - max
+e - min
+f - min
+g - max
+h - min
+i - min
+
+At depth 3, the evaluation function is called at level j,
+so Right should be returned. If your algorithm is returning
+a different action, check how you implemented your depth.
+"""
+
+num_agents: "3"
+
+start_state: "a"
+win_states: "j1 j2 j3"
+lose_states: ""
+
+successors: """
+a Left b1
+a Center b2
+a Right b3
+b1 Center c1
+b2 Center c2
+b3 Center c3
+c1 Center d1
+c2 Center d2
+c3 Center d3
+d1 Center e1
+d2 Center e2
+d3 Center e3
+e1 Center f1
+e2 Center f2
+e3 Center f3
+f1 Center g1
+f2 Center g2
+f3 Center g3
+g1 Center h1
+g2 Center h2
+g3 Center h3
+h1 Center i1
+h2 Center i2
+h3 Center i3
+i1 Center j1
+i2 Center j2
+i3 Center j3
+"""
+
+
+evaluation: """
+b1 0.0
+b2 0.0
+b3 8.0
+c1 0.0
+c2 0.0
+c3 8.0
+d1 10.0
+d2 0.0
+d3 8.0
+e1 0.0
+e2 0.0
+e3 8.0
+f1 0.0
+f2 0.0
+f3 8.0
+g1 0.0
+g2 10.0
+g3 8.0
+h1 0.0
+h2 0.0
+h3 8.0
+i1 0.0
+i2 0.0
+i3 8.0
+j1 0.0
+j2 0.0
+j3 8.0
+"""
+
+
diff --git a/test_cases/q4/7-pacman-game.solution b/test_cases/q4/7-pacman-game.solution
new file mode 100644
index 0000000..ecc544b
--- /dev/null
+++ b/test_cases/q4/7-pacman-game.solution
@@ -0,0 +1,444 @@
+optimalActions: """
+[[["West", "East"], 59], [["West", "East"], 35]]
+[[["West"], 190], [["West"], 127]]
+[[["West"], 190], [["West"], 135]]
+[[["West", "North"], 120], [["West", "North"], 82]]
+[[["West"], 77], [["West"], 57]]
+[[["West", "North"], 143], [["West", "North"], 97]]
+[[["West"], 155], [["West"], 110]]
+[[["West"], 40], [["West"], 27]]
+[[["North"], 64], [["North"], 43]]
+[[["North"], 85], [["North"], 57]]
+[[["North"], 106], [["North"], 71]]
+[[["North"], 97], [["North"], 65]]
+[[["East"], 154], [["East"], 103]]
+[[["Stop"], 130], [["West"], 85]]
+[[["West"], 30], [["West"], 17]]
+[[["South"], 15], [["South"], 9]]
+[[["South"], 15], [["South"], 9]]
+[[["South"], 15], [["South"], 9]]
+[[["South"], 15], [["South"], 9]]
+[[["East"], 15], [["East"], 9]]
+[[["East"], 15], [["East"], 9]]
+[[["East"], 18], [["East"], 12]]
+[[["North"], 29], [["North"], 18]]
+[[["North"], 50], [["North"], 31]]
+[[["West"], 55], [["West"], 36]]
+[[["East"], 29], [["East"], 16]]
+[[["North"], 89], [["North"], 61]]
+[[["East", "North"], 161], [["East", "North"], 121]]
+[[["East", "North"], 221], [["East", "North"], 166]]
+[[["North", "South"], 105], [["North", "South"], 77]]
+[[["West"], 69], [["West"], 51]]
+[[["West"], 94], [["West"], 69]]
+[[["West", "Stop"], 57], [["West"], 42]]
+[[["West", "Stop", "East"], 69], [["West", "East"], 49]]
+[[["West", "Stop", "East"], 61], [["West", "East"], 41]]
+[[["Stop", "East", "South"], 55], [["East", "South"], 37]]
+[[["Stop", "East", "South"], 28], [["East", "South"], 19]]
+[[["Stop", "East", "South"], 34], [["East", "South"], 23]]
+[[["Stop", "East", "South"], 55], [["East", "South"], 37]]
+[[["Stop", "East", "South"], 55], [["East", "South"], 37]]
+[[["Stop", "East", "South"], 61], [["East", "South"], 41]]
+[[["Stop", "East", "South"], 85], [["East", "South"], 57]]
+[[["Stop", "East", "South"], 64], [["East", "South"], 43]]
+[[["Stop", "East", "South"], 61], [["East", "South"], 41]]
+[[["Stop", "East", "South"], 61], [["East", "South"], 41]]
+[[["Stop", "East", "South"], 85], [["East", "South"], 57]]
+[[["Stop", "East", "South"], 102], [["East", "South"], 67]]
+[[["Stop", "South"], 23], [["South"], 13]]
+[[["South"], 15], [["South"], 9]]
+[[["South"], 15], [["South"], 9]]
+[[["South"], 15], [["South"], 9]]
+[[["South"], 15], [["South"], 9]]
+[[["East"], 15], [["East"], 9]]
+[[["East"], 15], [["East"], 9]]
+[[["East"], 18], [["East"], 12]]
+[[["East", "North"], 29], [["East", "North"], 18]]
+[[["East"], 38], [["East"], 22]]
+[[["North"], 29], [["North"], 18]]
+[[["North"], 38], [["North"], 22]]
+[[["East"], 33], [["East"], 22]]
+[[["East"], 37], [["East"], 18]]
+[[["East"], 18], [["East"], 12]]
+[[["East"], 37], [["East"], 26]]
+[[["East"], 69], [["East"], 41]]
+[[["East"], 56], [["East"], 26]]
+[[["East"], 44], [["East"], 29]]
+[[["North", "South"], 83], [["North", "South"], 52]]
+[[["East", "North"], 121], [["East", "North"], 74]]
+[[["East", "North"], 97], [["East", "North"], 73]]
+[[["North", "South"], 173], [["North", "South"], 130]]
+[[["West", "East"], 90], [["West", "East"], 66]]
+[[["West", "Stop", "East"], 161], [["West", "East"], 118]]
+[[["Stop", "East", "South"], 58], [["East", "South"], 43]]
+[[["Stop", "East"], 120], [["East"], 85]]
+[[["East"], 78], [["East"], 45]]
+[[["West"], 77], [["West"], 42]]
+[[["South"], 83], [["South"], 48]]
+[[["South"], 49], [["South"], 37]]
+[[["South"], 185], [["South"], 104]]
+[[["South"], 68], [["South"], 41]]
+[[["West"], 30], [["West"], 18]]
+[[["West"], 56], [["West"], 29]]
+[[["West"], 14], [["West"], 10]]
+[[["West"], 20], [["West"], 14]]
+[[["West"], 13], [["West"], 9]]
+[[["West"], 13], [["West"], 9]]
+[[["West"], 16], [["West"], 12]]
+[[["West", "North"], 30], [["West", "North"], 20]]
+[[["West"], 38], [["West"], 23]]
+[[["West", "Stop", "East", "North"], 70], [["West", "East", "North"], 46]]
+[[["West", "Stop", "East"], 128], [["West", "East"], 89]]
+[[["West", "Stop", "East"], 31], [["West", "East"], 20]]
+[[["Stop", "East", "North"], 69], [["East", "North"], 45]]
+[[["Stop", "North"], 58], [["North"], 31]]
+[[["North"], 34], [["North"], 19]]
+[[["North"], 30], [["North"], 17]]
+[[["North"], 19], [["North"], 11]]
+[[["North"], 34], [["North"], 19]]
+[[["East"], 30], [["East"], 17]]
+[[["East"], 19], [["East"], 11]]
+[[["East"], 44], [["East"], 29]]
+[[["East", "South"], 87], [["East", "South"], 60]]
+[[["East", "South"], 108], [["East", "South"], 62]]
+[[["South"], 120], [["South"], 61]]
+[[["North", "South"], 209], [["North", "South"], 132]]
+[[["West"], 108], [["West"], 60]]
+[[["West", "Stop", "East", "South"], 83], [["West", "East", "South"], 61]]
+[[["West", "Stop", "East", "South"], 90], [["West", "East", "South"], 66]]
+[[["West", "Stop", "East"], 134], [["West", "East"], 95]]
+[[["West", "Stop", "East"], 82], [["West", "East"], 55]]
+[[["Stop", "East", "South"], 142], [["East", "South"], 95]]
+[[["Stop", "East", "South"], 98], [["East", "South"], 65]]
+[[["Stop", "East", "South"], 128], [["East", "South"], 86]]
+[[["Stop", "East", "South"], 82], [["East", "South"], 55]]
+[[["Stop", "East", "South"], 85], [["East", "South"], 57]]
+[[["Stop", "East", "South"], 190], [["East", "South"], 127]]
+[[["Stop", "East", "South"], 158], [["East", "South"], 103]]
+[[["Stop", "South"], 50], [["South"], 27]]
+[[["South"], 30], [["South"], 17]]
+[[["South"], 15], [["South"], 9]]
+[[["South"], 15], [["South"], 9]]
+[[["South"], 15], [["South"], 9]]
+[[["East"], 15], [["East"], 9]]
+[[["East"], 15], [["East"], 9]]
+[[["East"], 18], [["East"], 12]]
+[[["East", "North"], 29], [["East", "North"], 18]]
+[[["East"], 37], [["East"], 22]]
+[[["East", "North"], 41], [["East", "North"], 24]]
+[[["East"], 59], [["East"], 29]]
+[[["East"], 19], [["East"], 11]]
+[[["East"], 26], [["East"], 15]]
+[[["East"], 15], [["East"], 9]]
+[[["East"], 15], [["East"], 9]]
+[[["East"], 18], [["East"], 12]]
+[[["East"], 29], [["East"], 18]]
+[[["East"], 37], [["East"], 22]]
+[[["East", "North"], 41], [["East", "North"], 24]]
+[[["East"], 59], [["East"], 29]]
+[[["East"], 19], [["East"], 11]]
+[[["North"], 26], [["North"], 15]]
+[[["North"], 19], [["North"], 11]]
+[[["North"], 30], [["North"], 17]]
+[[["North"], 34], [["North"], 19]]
+[[["West"], 34], [["West"], 19]]
+[[["West"], 25], [["West"], 13]]
+[[["West", "Stop", "East"], 7], [["West", "East"], 3]]
+"""
+altDepthActions: """
+[["West", "East"], ["West", "East"], ["West", "East"], ["West", "East"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West", "North"], ["West", "North"], ["West", "North"], ["West", "North"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West", "North"], ["West", "North"], ["West", "North"], ["West", "North"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["West", "Stop"], ["West"], ["Stop"], ["West"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["North"], ["North"], ["West"], ["West"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["East", "North"], ["East", "North"], ["East", "North"], ["East", "North"]]
+[["East", "North"], ["East", "North"], ["East"], ["East"]]
+[["North", "South"], ["North", "South"], ["North"], ["North"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West"], ["West"], ["West", "Stop"], ["West"]]
+[["West"], ["West"], ["West", "Stop", "East", "South"], ["West", "East", "South"]]
+[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]]
+[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "South"], ["South"], ["Stop", "South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East", "North"], ["East", "North"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["North", "South"], ["North", "South"], ["South"], ["South"]]
+[["East", "North"], ["East", "North"], ["East", "North"], ["East", "North"]]
+[["East", "North"], ["East", "North"], ["East", "North"], ["East", "North"]]
+[["North", "South"], ["North", "South"], ["North"], ["North"]]
+[["West", "East"], ["West", "East"], ["East"], ["East"]]
+[["West"], ["West"], ["East"], ["East"]]
+[["Stop", "East", "South"], ["East", "South"], ["East"], ["East"]]
+[["Stop", "East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["West", "East"], ["West", "East"], ["West"], ["West"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West", "North"], ["West", "North"], ["West", "North"], ["West", "North"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West", "Stop", "East", "North"], ["West", "East", "North"], ["West", "Stop", "East", "North"], ["West", "East", "North"]]
+[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]]
+[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]]
+[["Stop", "East", "North"], ["East", "North"], ["Stop", "East", "North"], ["East", "North"]]
+[["Stop", "North"], ["North"], ["Stop", "North"], ["North"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East", "South"], ["East", "South"], ["East", "South"], ["East", "South"]]
+[["East", "South"], ["East", "South"], ["East", "South"], ["East", "South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["North", "South"], ["North", "South"], ["North", "South"], ["North", "South"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West", "Stop", "East", "South"], ["West", "East", "South"], ["West", "Stop", "East", "South"], ["West", "East", "South"]]
+[["West", "Stop", "East", "South"], ["West", "East", "South"], ["West", "Stop", "East", "South"], ["West", "East", "South"]]
+[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]]
+[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"], ["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "South"], ["South"], ["Stop", "South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["South"], ["South"], ["South"], ["South"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East", "North"], ["East", "North"], ["East", "North"], ["East", "North"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East", "North"], ["East", "North"], ["East", "North"], ["East", "North"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East", "North"], ["East", "North"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["East"], ["East"], ["East"], ["East"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["North"], ["North"], ["North"], ["North"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West"], ["West"], ["West"], ["West"]]
+[["West", "Stop", "East"], ["West", "East"], ["West", "Stop", "East"], ["West", "East"]]
+"""
+partialPlyBugActions: """
+[["West", "East"], ["West", "East"]]
+[["West"], ["West"]]
+[["West"], ["West"]]
+[["West", "North"], ["West", "North"]]
+[["West"], ["West"]]
+[["West", "North"], ["West", "North"]]
+[["West"], ["West"]]
+[["West"], ["West"]]
+[["North"], ["North"]]
+[["North"], ["North"]]
+[["North"], ["North"]]
+[["North"], ["North"]]
+[["East"], ["East"]]
+[["Stop"], ["West"]]
+[["West"], ["West"]]
+[["South"], ["South"]]
+[["South"], ["South"]]
+[["South"], ["South"]]
+[["South"], ["South"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["North"], ["North"]]
+[["North"], ["North"]]
+[["West"], ["West"]]
+[["East"], ["East"]]
+[["North"], ["North"]]
+[["East", "North"], ["East", "North"]]
+[["East", "North"], ["East", "North"]]
+[["North", "South"], ["North", "South"]]
+[["West"], ["West"]]
+[["West"], ["West"]]
+[["West", "Stop"], ["West"]]
+[["West", "Stop", "East"], ["West", "East"]]
+[["West", "Stop", "East"], ["West", "East"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "South"], ["South"]]
+[["South"], ["South"]]
+[["South"], ["South"]]
+[["South"], ["South"]]
+[["South"], ["South"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East", "North"], ["East", "North"]]
+[["East"], ["East"]]
+[["North"], ["North"]]
+[["North"], ["North"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["North", "South"], ["North", "South"]]
+[["East", "North"], ["East", "North"]]
+[["East", "North"], ["East", "North"]]
+[["North", "South"], ["North", "South"]]
+[["West", "East"], ["West", "East"]]
+[["West", "Stop", "East"], ["West", "East"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East"], ["East"]]
+[["East"], ["East"]]
+[["West"], ["West"]]
+[["South"], ["South"]]
+[["South"], ["South"]]
+[["South"], ["South"]]
+[["South"], ["South"]]
+[["West"], ["West"]]
+[["West"], ["West"]]
+[["West"], ["West"]]
+[["West"], ["West"]]
+[["West"], ["West"]]
+[["West"], ["West"]]
+[["West"], ["West"]]
+[["West", "North"], ["West", "North"]]
+[["West"], ["West"]]
+[["West", "Stop", "East", "North"], ["West", "East", "North"]]
+[["West", "Stop", "East"], ["West", "East"]]
+[["West", "Stop", "East"], ["West", "East"]]
+[["Stop", "East", "North"], ["East", "North"]]
+[["Stop", "North"], ["North"]]
+[["North"], ["North"]]
+[["North"], ["North"]]
+[["North"], ["North"]]
+[["North"], ["North"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East", "South"], ["East", "South"]]
+[["East", "South"], ["East", "South"]]
+[["South"], ["South"]]
+[["North", "South"], ["North", "South"]]
+[["West"], ["West"]]
+[["West", "Stop", "East", "South"], ["West", "East", "South"]]
+[["West", "Stop", "East", "South"], ["West", "East", "South"]]
+[["West", "Stop", "East"], ["West", "East"]]
+[["West", "Stop", "East"], ["West", "East"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "East", "South"], ["East", "South"]]
+[["Stop", "South"], ["South"]]
+[["South"], ["South"]]
+[["South"], ["South"]]
+[["South"], ["South"]]
+[["South"], ["South"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East", "North"], ["East", "North"]]
+[["East"], ["East"]]
+[["East", "North"], ["East", "North"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["East", "North"], ["East", "North"]]
+[["East"], ["East"]]
+[["East"], ["East"]]
+[["North"], ["North"]]
+[["North"], ["North"]]
+[["North"], ["North"]]
+[["North"], ["North"]]
+[["West"], ["West"]]
+[["West"], ["West"]]
+[["West", "Stop", "East"], ["West", "East"]]
+"""
diff --git a/test_cases/q4/7-pacman-game.test b/test_cases/q4/7-pacman-game.test
new file mode 100644
index 0000000..435cf2f
--- /dev/null
+++ b/test_cases/q4/7-pacman-game.test
@@ -0,0 +1,19 @@
+class: "PacmanGameTreeTest"
+alg: "ExpectimaxAgent"
+seed: "0"
+depth: "2"
+max_points: "4"
+
+# The following specifies the layout to be used
+layoutName: "smallClassic"
+layout: """
+%%%%%%%%%%%%%%%%%%%%
+%......%G G%......%
+%.%%...%% %%...%%.%
+%.%o.%........%.o%.%
+%.%%.%.%%%%%%.%.%%.%
+%........P.........%
+%%%%%%%%%%%%%%%%%%%%
+"""
+
+
diff --git a/test_cases/q4/CONFIG b/test_cases/q4/CONFIG
new file mode 100644
index 0000000..a5adc3f
--- /dev/null
+++ b/test_cases/q4/CONFIG
@@ -0,0 +1,2 @@
+max_points: "5"
+class: "PassAllTestsQuestion"
diff --git a/test_cases/q5/CONFIG b/test_cases/q5/CONFIG
new file mode 100644
index 0000000..df50d84
--- /dev/null
+++ b/test_cases/q5/CONFIG
@@ -0,0 +1,2 @@
+max_points: "6"
+class: "PartialCreditQuestion"
diff --git a/test_cases/q5/grade-agent.solution b/test_cases/q5/grade-agent.solution
new file mode 100644
index 0000000..55fcfc8
--- /dev/null
+++ b/test_cases/q5/grade-agent.solution
@@ -0,0 +1,2 @@
+# This is the solution file for test_cases/q5/grade-agent.test.
+# File intentionally blank.
diff --git a/test_cases/q5/grade-agent.test b/test_cases/q5/grade-agent.test
new file mode 100644
index 0000000..4109586
--- /dev/null
+++ b/test_cases/q5/grade-agent.test
@@ -0,0 +1,18 @@
+class: "EvalAgentTest"
+
+agentName: "ExpectimaxAgent"
+agentArgs: "evalFn=better"
+layoutName: "smallClassic"
+maxTime: "120"
+numGames: "10"
+
+nonTimeoutMinimum: "0"
+nonTimeoutThresholds: "10"
+
+scoreThresholds: "500 1000"
+
+winsMinimum: "1"
+winsThresholds: "1 5 10"
+
+randomSeed: "0"
+ghosts: "[RandomGhost(1)]"
diff --git a/textDisplay.py b/textDisplay.py
new file mode 100644
index 0000000..e920ad4
--- /dev/null
+++ b/textDisplay.py
@@ -0,0 +1,81 @@
+# textDisplay.py
+# --------------
+# Licensing Information: You are free to use or extend these projects for
+# educational purposes provided that (1) you do not distribute or publish
+# solutions, (2) you retain this notice, and (3) you provide clear
+# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
+#
+# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
+# The core projects and autograders were primarily created by John DeNero
+# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
+# Student side autograding was added by Brad Miller, Nick Hay, and
+# Pieter Abbeel (pabbeel@cs.berkeley.edu).
+
+
+import time
+try:
+ import pacman
+except:
+ pass
+
+DRAW_EVERY = 1
+SLEEP_TIME = 0 # This can be overwritten by __init__
+DISPLAY_MOVES = False
+QUIET = False # Supresses output
+
+class NullGraphics:
+ def initialize(self, state, isBlue = False):
+ pass
+
+ def update(self, state):
+ pass
+
+ def checkNullDisplay(self):
+ return True
+
+ def pause(self):
+ time.sleep(SLEEP_TIME)
+
+ def draw(self, state):
+ print state
+
+ def updateDistributions(self, dist):
+ pass
+
+ def finish(self):
+ pass
+
+class PacmanGraphics:
+ def __init__(self, speed=None):
+ if speed != None:
+ global SLEEP_TIME
+ SLEEP_TIME = speed
+
+ def initialize(self, state, isBlue = False):
+ self.draw(state)
+ self.pause()
+ self.turn = 0
+ self.agentCounter = 0
+
+ def update(self, state):
+ numAgents = len(state.agentStates)
+ self.agentCounter = (self.agentCounter + 1) % numAgents
+ if self.agentCounter == 0:
+ self.turn += 1
+ if DISPLAY_MOVES:
+ ghosts = [pacman.nearestPoint(state.getGhostPosition(i)) for i in range(1, numAgents)]
+ print "%4d) P: %-8s" % (self.turn, str(pacman.nearestPoint(state.getPacmanPosition()))),'| Score: %-5d' % state.score,'| Ghosts:', ghosts
+ if self.turn % DRAW_EVERY == 0:
+ self.draw(state)
+ self.pause()
+ if state._win or state._lose:
+ self.draw(state)
+
+ def pause(self):
+ time.sleep(SLEEP_TIME)
+
+ def draw(self, state):
+ print state
+
+ def finish(self):
+ pass
diff --git a/util.py b/util.py
new file mode 100644
index 0000000..7b0cf93
--- /dev/null
+++ b/util.py
@@ -0,0 +1,653 @@
+# util.py
+# -------
+# Licensing Information: You are free to use or extend these projects for
+# educational purposes provided that (1) you do not distribute or publish
+# solutions, (2) you retain this notice, and (3) you provide clear
+# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
+#
+# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
+# The core projects and autograders were primarily created by John DeNero
+# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
+# Student side autograding was added by Brad Miller, Nick Hay, and
+# Pieter Abbeel (pabbeel@cs.berkeley.edu).
+
+
+import sys
+import inspect
+import heapq, random
+import cStringIO
+
+
+class FixedRandom:
+ def __init__(self):
+ fixedState = (3, (2147483648L, 507801126L, 683453281L, 310439348L, 2597246090L, \
+ 2209084787L, 2267831527L, 979920060L, 3098657677L, 37650879L, 807947081L, 3974896263L, \
+ 881243242L, 3100634921L, 1334775171L, 3965168385L, 746264660L, 4074750168L, 500078808L, \
+ 776561771L, 702988163L, 1636311725L, 2559226045L, 157578202L, 2498342920L, 2794591496L, \
+ 4130598723L, 496985844L, 2944563015L, 3731321600L, 3514814613L, 3362575829L, 3038768745L, \
+ 2206497038L, 1108748846L, 1317460727L, 3134077628L, 988312410L, 1674063516L, 746456451L, \
+ 3958482413L, 1857117812L, 708750586L, 1583423339L, 3466495450L, 1536929345L, 1137240525L, \
+ 3875025632L, 2466137587L, 1235845595L, 4214575620L, 3792516855L, 657994358L, 1241843248L, \
+ 1695651859L, 3678946666L, 1929922113L, 2351044952L, 2317810202L, 2039319015L, 460787996L, \
+ 3654096216L, 4068721415L, 1814163703L, 2904112444L, 1386111013L, 574629867L, 2654529343L, \
+ 3833135042L, 2725328455L, 552431551L, 4006991378L, 1331562057L, 3710134542L, 303171486L, \
+ 1203231078L, 2670768975L, 54570816L, 2679609001L, 578983064L, 1271454725L, 3230871056L, \
+ 2496832891L, 2944938195L, 1608828728L, 367886575L, 2544708204L, 103775539L, 1912402393L, \
+ 1098482180L, 2738577070L, 3091646463L, 1505274463L, 2079416566L, 659100352L, 839995305L, \
+ 1696257633L, 274389836L, 3973303017L, 671127655L, 1061109122L, 517486945L, 1379749962L, \
+ 3421383928L, 3116950429L, 2165882425L, 2346928266L, 2892678711L, 2936066049L, 1316407868L, \
+ 2873411858L, 4279682888L, 2744351923L, 3290373816L, 1014377279L, 955200944L, 4220990860L, \
+ 2386098930L, 1772997650L, 3757346974L, 1621616438L, 2877097197L, 442116595L, 2010480266L, \
+ 2867861469L, 2955352695L, 605335967L, 2222936009L, 2067554933L, 4129906358L, 1519608541L, \
+ 1195006590L, 1942991038L, 2736562236L, 279162408L, 1415982909L, 4099901426L, 1732201505L, \
+ 2934657937L, 860563237L, 2479235483L, 3081651097L, 2244720867L, 3112631622L, 1636991639L, \
+ 3860393305L, 2312061927L, 48780114L, 1149090394L, 2643246550L, 1764050647L, 3836789087L, \
+ 3474859076L, 4237194338L, 1735191073L, 2150369208L, 92164394L, 756974036L, 2314453957L, \
+ 323969533L, 4267621035L, 283649842L, 810004843L, 727855536L, 1757827251L, 3334960421L, \
+ 3261035106L, 38417393L, 2660980472L, 1256633965L, 2184045390L, 811213141L, 2857482069L, \
+ 2237770878L, 3891003138L, 2787806886L, 2435192790L, 2249324662L, 3507764896L, 995388363L, \
+ 856944153L, 619213904L, 3233967826L, 3703465555L, 3286531781L, 3863193356L, 2992340714L, \
+ 413696855L, 3865185632L, 1704163171L, 3043634452L, 2225424707L, 2199018022L, 3506117517L, \
+ 3311559776L, 3374443561L, 1207829628L, 668793165L, 1822020716L, 2082656160L, 1160606415L, \
+ 3034757648L, 741703672L, 3094328738L, 459332691L, 2702383376L, 1610239915L, 4162939394L, \
+ 557861574L, 3805706338L, 3832520705L, 1248934879L, 3250424034L, 892335058L, 74323433L, \
+ 3209751608L, 3213220797L, 3444035873L, 3743886725L, 1783837251L, 610968664L, 580745246L, \
+ 4041979504L, 201684874L, 2673219253L, 1377283008L, 3497299167L, 2344209394L, 2304982920L, \
+ 3081403782L, 2599256854L, 3184475235L, 3373055826L, 695186388L, 2423332338L, 222864327L, \
+ 1258227992L, 3627871647L, 3487724980L, 4027953808L, 3053320360L, 533627073L, 3026232514L, \
+ 2340271949L, 867277230L, 868513116L, 2158535651L, 2487822909L, 3428235761L, 3067196046L, \
+ 3435119657L, 1908441839L, 788668797L, 3367703138L, 3317763187L, 908264443L, 2252100381L, \
+ 764223334L, 4127108988L, 384641349L, 3377374722L, 1263833251L, 1958694944L, 3847832657L, \
+ 1253909612L, 1096494446L, 555725445L, 2277045895L, 3340096504L, 1383318686L, 4234428127L, \
+ 1072582179L, 94169494L, 1064509968L, 2681151917L, 2681864920L, 734708852L, 1338914021L, \
+ 1270409500L, 1789469116L, 4191988204L, 1716329784L, 2213764829L, 3712538840L, 919910444L, \
+ 1318414447L, 3383806712L, 3054941722L, 3378649942L, 1205735655L, 1268136494L, 2214009444L, \
+ 2532395133L, 3232230447L, 230294038L, 342599089L, 772808141L, 4096882234L, 3146662953L, \
+ 2784264306L, 1860954704L, 2675279609L, 2984212876L, 2466966981L, 2627986059L, 2985545332L, \
+ 2578042598L, 1458940786L, 2944243755L, 3959506256L, 1509151382L, 325761900L, 942251521L, \
+ 4184289782L, 2756231555L, 3297811774L, 1169708099L, 3280524138L, 3805245319L, 3227360276L, \
+ 3199632491L, 2235795585L, 2865407118L, 36763651L, 2441503575L, 3314890374L, 1755526087L, \
+ 17915536L, 1196948233L, 949343045L, 3815841867L, 489007833L, 2654997597L, 2834744136L, \
+ 417688687L, 2843220846L, 85621843L, 747339336L, 2043645709L, 3520444394L, 1825470818L, \
+ 647778910L, 275904777L, 1249389189L, 3640887431L, 4200779599L, 323384601L, 3446088641L, \
+ 4049835786L, 1718989062L, 3563787136L, 44099190L, 3281263107L, 22910812L, 1826109246L, \
+ 745118154L, 3392171319L, 1571490704L, 354891067L, 815955642L, 1453450421L, 940015623L, \
+ 796817754L, 1260148619L, 3898237757L, 176670141L, 1870249326L, 3317738680L, 448918002L, \
+ 4059166594L, 2003827551L, 987091377L, 224855998L, 3520570137L, 789522610L, 2604445123L, \
+ 454472869L, 475688926L, 2990723466L, 523362238L, 3897608102L, 806637149L, 2642229586L, \
+ 2928614432L, 1564415411L, 1691381054L, 3816907227L, 4082581003L, 1895544448L, 3728217394L, \
+ 3214813157L, 4054301607L, 1882632454L, 2873728645L, 3694943071L, 1297991732L, 2101682438L, \
+ 3952579552L, 678650400L, 1391722293L, 478833748L, 2976468591L, 158586606L, 2576499787L, \
+ 662690848L, 3799889765L, 3328894692L, 2474578497L, 2383901391L, 1718193504L, 3003184595L, \
+ 3630561213L, 1929441113L, 3848238627L, 1594310094L, 3040359840L, 3051803867L, 2462788790L, \
+ 954409915L, 802581771L, 681703307L, 545982392L, 2738993819L, 8025358L, 2827719383L, \
+ 770471093L, 3484895980L, 3111306320L, 3900000891L, 2116916652L, 397746721L, 2087689510L, \
+ 721433935L, 1396088885L, 2751612384L, 1998988613L, 2135074843L, 2521131298L, 707009172L, \
+ 2398321482L, 688041159L, 2264560137L, 482388305L, 207864885L, 3735036991L, 3490348331L, \
+ 1963642811L, 3260224305L, 3493564223L, 1939428454L, 1128799656L, 1366012432L, 2858822447L, \
+ 1428147157L, 2261125391L, 1611208390L, 1134826333L, 2374102525L, 3833625209L, 2266397263L, \
+ 3189115077L, 770080230L, 2674657172L, 4280146640L, 3604531615L, 4235071805L, 3436987249L, \
+ 509704467L, 2582695198L, 4256268040L, 3391197562L, 1460642842L, 1617931012L, 457825497L, \
+ 1031452907L, 1330422862L, 4125947620L, 2280712485L, 431892090L, 2387410588L, 2061126784L, \
+ 896457479L, 3480499461L, 2488196663L, 4021103792L, 1877063114L, 2744470201L, 1046140599L, \
+ 2129952955L, 3583049218L, 4217723693L, 2720341743L, 820661843L, 1079873609L, 3360954200L, \
+ 3652304997L, 3335838575L, 2178810636L, 1908053374L, 4026721976L, 1793145418L, 476541615L, \
+ 973420250L, 515553040L, 919292001L, 2601786155L, 1685119450L, 3030170809L, 1590676150L, \
+ 1665099167L, 651151584L, 2077190587L, 957892642L, 646336572L, 2743719258L, 866169074L, \
+ 851118829L, 4225766285L, 963748226L, 799549420L, 1955032629L, 799460000L, 2425744063L, \
+ 2441291571L, 1928963772L, 528930629L, 2591962884L, 3495142819L, 1896021824L, 901320159L, \
+ 3181820243L, 843061941L, 3338628510L, 3782438992L, 9515330L, 1705797226L, 953535929L, \
+ 764833876L, 3202464965L, 2970244591L, 519154982L, 3390617541L, 566616744L, 3438031503L, \
+ 1853838297L, 170608755L, 1393728434L, 676900116L, 3184965776L, 1843100290L, 78995357L, \
+ 2227939888L, 3460264600L, 1745705055L, 1474086965L, 572796246L, 4081303004L, 882828851L, \
+ 1295445825L, 137639900L, 3304579600L, 2722437017L, 4093422709L, 273203373L, 2666507854L, \
+ 3998836510L, 493829981L, 1623949669L, 3482036755L, 3390023939L, 833233937L, 1639668730L, \
+ 1499455075L, 249728260L, 1210694006L, 3836497489L, 1551488720L, 3253074267L, 3388238003L, \
+ 2372035079L, 3945715164L, 2029501215L, 3362012634L, 2007375355L, 4074709820L, 631485888L, \
+ 3135015769L, 4273087084L, 3648076204L, 2739943601L, 1374020358L, 1760722448L, 3773939706L, \
+ 1313027823L, 1895251226L, 4224465911L, 421382535L, 1141067370L, 3660034846L, 3393185650L, \
+ 1850995280L, 1451917312L, 3841455409L, 3926840308L, 1397397252L, 2572864479L, 2500171350L, \
+ 3119920613L, 531400869L, 1626487579L, 1099320497L, 407414753L, 2438623324L, 99073255L, \
+ 3175491512L, 656431560L, 1153671785L, 236307875L, 2824738046L, 2320621382L, 892174056L, \
+ 230984053L, 719791226L, 2718891946L, 624L), None)
+ self.random = random.Random()
+ self.random.setstate(fixedState)
+
+"""
+ Data structures useful for implementing SearchAgents
+"""
+
+class Stack:
+ "A container with a last-in-first-out (LIFO) queuing policy."
+ def __init__(self):
+ self.list = []
+
+ def push(self,item):
+ "Push 'item' onto the stack"
+ self.list.append(item)
+
+ def pop(self):
+ "Pop the most recently pushed item from the stack"
+ return self.list.pop()
+
+ def isEmpty(self):
+ "Returns true if the stack is empty"
+ return len(self.list) == 0
+
+class Queue:
+ "A container with a first-in-first-out (FIFO) queuing policy."
+ def __init__(self):
+ self.list = []
+
+ def push(self,item):
+ "Enqueue the 'item' into the queue"
+ self.list.insert(0,item)
+
+ def pop(self):
+ """
+ Dequeue the earliest enqueued item still in the queue. This
+ operation removes the item from the queue.
+ """
+ return self.list.pop()
+
+ def isEmpty(self):
+ "Returns true if the queue is empty"
+ return len(self.list) == 0
+
+class PriorityQueue:
+ """
+ Implements a priority queue data structure. Each inserted item
+ has a priority associated with it and the client is usually interested
+ in quick retrieval of the lowest-priority item in the queue. This
+ data structure allows O(1) access to the lowest-priority item.
+
+ Note that this PriorityQueue does not allow you to change the priority
+ of an item. However, you may insert the same item multiple times with
+ different priorities.
+ """
+ def __init__(self):
+ self.heap = []
+ self.count = 0
+
+ def push(self, item, priority):
+ # FIXME: restored old behaviour to check against old results better
+ # FIXED: restored to stable behaviour
+ entry = (priority, self.count, item)
+ # entry = (priority, item)
+ heapq.heappush(self.heap, entry)
+ self.count += 1
+
+ def pop(self):
+ (_, _, item) = heapq.heappop(self.heap)
+ # (_, item) = heapq.heappop(self.heap)
+ return item
+
+ def isEmpty(self):
+ return len(self.heap) == 0
+
+class PriorityQueueWithFunction(PriorityQueue):
+ """
+ Implements a priority queue with the same push/pop signature of the
+ Queue and the Stack classes. This is designed for drop-in replacement for
+ those two classes. The caller has to provide a priority function, which
+ extracts each item's priority.
+ """
+ def __init__(self, priorityFunction):
+ "priorityFunction (item) -> priority"
+ self.priorityFunction = priorityFunction # store the priority function
+ PriorityQueue.__init__(self) # super-class initializer
+
+ def push(self, item):
+ "Adds an item to the queue with priority from the priority function"
+ PriorityQueue.push(self, item, self.priorityFunction(item))
+
+
+def manhattanDistance( xy1, xy2 ):
+ "Returns the Manhattan distance between points xy1 and xy2"
+ return abs( xy1[0] - xy2[0] ) + abs( xy1[1] - xy2[1] )
+
+"""
+ Data structures and functions useful for various course projects
+
+ The search project should not need anything below this line.
+"""
+
+class Counter(dict):
+ """
+ A counter keeps track of counts for a set of keys.
+
+ The counter class is an extension of the standard python
+ dictionary type. It is specialized to have number values
+ (integers or floats), and includes a handful of additional
+ functions to ease the task of counting data. In particular,
+ all keys are defaulted to have value 0. Using a dictionary:
+
+ a = {}
+ print a['test']
+
+ would give an error, while the Counter class analogue:
+
+ >>> a = Counter()
+ >>> print a['test']
+ 0
+
+ returns the default 0 value. Note that to reference a key
+ that you know is contained in the counter,
+ you can still use the dictionary syntax:
+
+ >>> a = Counter()
+ >>> a['test'] = 2
+ >>> print a['test']
+ 2
+
+ This is very useful for counting things without initializing their counts,
+ see for example:
+
+ >>> a['blah'] += 1
+ >>> print a['blah']
+ 1
+
+ The counter also includes additional functionality useful in implementing
+ the classifiers for this assignment. Two counters can be added,
+ subtracted or multiplied together. See below for details. They can
+ also be normalized and their total count and arg max can be extracted.
+ """
+ def __getitem__(self, idx):
+ self.setdefault(idx, 0)
+ return dict.__getitem__(self, idx)
+
+ def incrementAll(self, keys, count):
+ """
+ Increments all elements of keys by the same count.
+
+ >>> a = Counter()
+ >>> a.incrementAll(['one','two', 'three'], 1)
+ >>> a['one']
+ 1
+ >>> a['two']
+ 1
+ """
+ for key in keys:
+ self[key] += count
+
+ def argMax(self):
+ """
+ Returns the key with the highest value.
+ """
+ if len(self.keys()) == 0: return None
+ all = self.items()
+ values = [x[1] for x in all]
+ maxIndex = values.index(max(values))
+ return all[maxIndex][0]
+
+ def sortedKeys(self):
+ """
+ Returns a list of keys sorted by their values. Keys
+ with the highest values will appear first.
+
+ >>> a = Counter()
+ >>> a['first'] = -2
+ >>> a['second'] = 4
+ >>> a['third'] = 1
+ >>> a.sortedKeys()
+ ['second', 'third', 'first']
+ """
+ sortedItems = self.items()
+ compare = lambda x, y: sign(y[1] - x[1])
+ sortedItems.sort(cmp=compare)
+ return [x[0] for x in sortedItems]
+
+ def totalCount(self):
+ """
+ Returns the sum of counts for all keys.
+ """
+ return sum(self.values())
+
+ def normalize(self):
+ """
+ Edits the counter such that the total count of all
+ keys sums to 1. The ratio of counts for all keys
+ will remain the same. Note that normalizing an empty
+ Counter will result in an error.
+ """
+ total = float(self.totalCount())
+ if total == 0: return
+ for key in self.keys():
+ self[key] = self[key] / total
+
+ def divideAll(self, divisor):
+ """
+ Divides all counts by divisor
+ """
+ divisor = float(divisor)
+ for key in self:
+ self[key] /= divisor
+
+ def copy(self):
+ """
+ Returns a copy of the counter
+ """
+ return Counter(dict.copy(self))
+
+ def __mul__(self, y ):
+ """
+ Multiplying two counters gives the dot product of their vectors where
+ each unique label is a vector element.
+
+ >>> a = Counter()
+ >>> b = Counter()
+ >>> a['first'] = -2
+ >>> a['second'] = 4
+ >>> b['first'] = 3
+ >>> b['second'] = 5
+ >>> a['third'] = 1.5
+ >>> a['fourth'] = 2.5
+ >>> a * b
+ 14
+ """
+ sum = 0
+ x = self
+ if len(x) > len(y):
+ x,y = y,x
+ for key in x:
+ if key not in y:
+ continue
+ sum += x[key] * y[key]
+ return sum
+
+ def __radd__(self, y):
+ """
+ Adding another counter to a counter increments the current counter
+ by the values stored in the second counter.
+
+ >>> a = Counter()
+ >>> b = Counter()
+ >>> a['first'] = -2
+ >>> a['second'] = 4
+ >>> b['first'] = 3
+ >>> b['third'] = 1
+ >>> a += b
+ >>> a['first']
+ 1
+ """
+ for key, value in y.items():
+ self[key] += value
+
+ def __add__( self, y ):
+ """
+ Adding two counters gives a counter with the union of all keys and
+ counts of the second added to counts of the first.
+
+ >>> a = Counter()
+ >>> b = Counter()
+ >>> a['first'] = -2
+ >>> a['second'] = 4
+ >>> b['first'] = 3
+ >>> b['third'] = 1
+ >>> (a + b)['first']
+ 1
+ """
+ addend = Counter()
+ for key in self:
+ if key in y:
+ addend[key] = self[key] + y[key]
+ else:
+ addend[key] = self[key]
+ for key in y:
+ if key in self:
+ continue
+ addend[key] = y[key]
+ return addend
+
+ def __sub__( self, y ):
+ """
+ Subtracting a counter from another gives a counter with the union of all keys and
+ counts of the second subtracted from counts of the first.
+
+ >>> a = Counter()
+ >>> b = Counter()
+ >>> a['first'] = -2
+ >>> a['second'] = 4
+ >>> b['first'] = 3
+ >>> b['third'] = 1
+ >>> (a - b)['first']
+ -5
+ """
+ addend = Counter()
+ for key in self:
+ if key in y:
+ addend[key] = self[key] - y[key]
+ else:
+ addend[key] = self[key]
+ for key in y:
+ if key in self:
+ continue
+ addend[key] = -1 * y[key]
+ return addend
+
+def raiseNotDefined():
+ fileName = inspect.stack()[1][1]
+ line = inspect.stack()[1][2]
+ method = inspect.stack()[1][3]
+
+ print "*** Method not implemented: %s at line %s of %s" % (method, line, fileName)
+ sys.exit(1)
+
+def normalize(vectorOrCounter):
+ """
+ normalize a vector or counter by dividing each value by the sum of all values
+ """
+ normalizedCounter = Counter()
+ if type(vectorOrCounter) == type(normalizedCounter):
+ counter = vectorOrCounter
+ total = float(counter.totalCount())
+ if total == 0: return counter
+ for key in counter.keys():
+ value = counter[key]
+ normalizedCounter[key] = value / total
+ return normalizedCounter
+ else:
+ vector = vectorOrCounter
+ s = float(sum(vector))
+ if s == 0: return vector
+ return [el / s for el in vector]
+
+def nSample(distribution, values, n):
+ if sum(distribution) != 1:
+ distribution = normalize(distribution)
+ rand = [random.random() for i in range(n)]
+ rand.sort()
+ samples = []
+ samplePos, distPos, cdf = 0,0, distribution[0]
+ while samplePos < n:
+ if rand[samplePos] < cdf:
+ samplePos += 1
+ samples.append(values[distPos])
+ else:
+ distPos += 1
+ cdf += distribution[distPos]
+ return samples
+
+def sample(distribution, values = None):
+ if type(distribution) == Counter:
+ items = sorted(distribution.items())
+ distribution = [i[1] for i in items]
+ values = [i[0] for i in items]
+ if sum(distribution) != 1:
+ distribution = normalize(distribution)
+ choice = random.random()
+ i, total= 0, distribution[0]
+ while choice > total:
+ i += 1
+ total += distribution[i]
+ return values[i]
+
+def sampleFromCounter(ctr):
+ items = sorted(ctr.items())
+ return sample([v for k,v in items], [k for k,v in items])
+
+def getProbability(value, distribution, values):
+ """
+ Gives the probability of a value under a discrete distribution
+ defined by (distributions, values).
+ """
+ total = 0.0
+ for prob, val in zip(distribution, values):
+ if val == value:
+ total += prob
+ return total
+
+def flipCoin( p ):
+ r = random.random()
+ return r < p
+
+def chooseFromDistribution( distribution ):
+ "Takes either a counter or a list of (prob, key) pairs and samples"
+ if type(distribution) == dict or type(distribution) == Counter:
+ return sample(distribution)
+ r = random.random()
+ base = 0.0
+ for prob, element in distribution:
+ base += prob
+ if r <= base: return element
+
+def nearestPoint( pos ):
+ """
+ Finds the nearest grid point to a position (discretizes).
+ """
+ ( current_row, current_col ) = pos
+
+ grid_row = int( current_row + 0.5 )
+ grid_col = int( current_col + 0.5 )
+ return ( grid_row, grid_col )
+
+def sign( x ):
+ """
+ Returns 1 or -1 depending on the sign of x
+ """
+ if( x >= 0 ):
+ return 1
+ else:
+ return -1
+
+def arrayInvert(array):
+ """
+ Inverts a matrix stored as a list of lists.
+ """
+ result = [[] for i in array]
+ for outer in array:
+ for inner in range(len(outer)):
+ result[inner].append(outer[inner])
+ return result
+
+def matrixAsList( matrix, value = True ):
+ """
+ Turns a matrix into a list of coordinates matching the specified value
+ """
+ rows, cols = len( matrix ), len( matrix[0] )
+ cells = []
+ for row in range( rows ):
+ for col in range( cols ):
+ if matrix[row][col] == value:
+ cells.append( ( row, col ) )
+ return cells
+
+def lookup(name, namespace):
+ """
+ Get a method or class from any imported module from its name.
+ Usage: lookup(functionName, globals())
+ """
+ dots = name.count('.')
+ if dots > 0:
+ moduleName, objName = '.'.join(name.split('.')[:-1]), name.split('.')[-1]
+ module = __import__(moduleName)
+ return getattr(module, objName)
+ else:
+ modules = [obj for obj in namespace.values() if str(type(obj)) == "<type 'module'>"]
+ options = [getattr(module, name) for module in modules if name in dir(module)]
+ options += [obj[1] for obj in namespace.items() if obj[0] == name ]
+ if len(options) == 1: return options[0]
+ if len(options) > 1: raise Exception, 'Name conflict for %s'
+ raise Exception, '%s not found as a method or class' % name
+
+def pause():
+ """
+ Pauses the output stream awaiting user feedback.
+ """
+ print "<Press enter/return to continue>"
+ raw_input()
+
+
+# code to handle timeouts
+#
+# FIXME
+# NOTE: TimeoutFuncton is NOT reentrant. Later timeouts will silently
+# disable earlier timeouts. Could be solved by maintaining a global list
+# of active time outs. Currently, questions which have test cases calling
+# this have all student code so wrapped.
+#
+import signal
+import time
+class TimeoutFunctionException(Exception):
+ """Exception to raise on a timeout"""
+ pass
+
+
+class TimeoutFunction:
+ def __init__(self, function, timeout):
+ self.timeout = timeout
+ self.function = function
+
+ def handle_timeout(self, signum, frame):
+ raise TimeoutFunctionException()
+
+ def __call__(self, *args, **keyArgs):
+ # If we have SIGALRM signal, use it to cause an exception if and
+ # when this function runs too long. Otherwise check the time taken
+ # after the method has returned, and throw an exception then.
+ if hasattr(signal, 'SIGALRM'):
+ old = signal.signal(signal.SIGALRM, self.handle_timeout)
+ signal.alarm(self.timeout)
+ try:
+ result = self.function(*args, **keyArgs)
+ finally:
+ signal.signal(signal.SIGALRM, old)
+ signal.alarm(0)
+ else:
+ startTime = time.time()
+ result = self.function(*args, **keyArgs)
+ timeElapsed = time.time() - startTime
+ if timeElapsed >= self.timeout:
+ self.handle_timeout(None, None)
+ return result
+
+
+
+_ORIGINAL_STDOUT = None
+_ORIGINAL_STDERR = None
+_MUTED = False
+
+class WritableNull:
+ def write(self, string):
+ pass
+
+def mutePrint():
+ global _ORIGINAL_STDOUT, _ORIGINAL_STDERR, _MUTED
+ if _MUTED:
+ return
+ _MUTED = True
+
+ _ORIGINAL_STDOUT = sys.stdout
+ #_ORIGINAL_STDERR = sys.stderr
+ sys.stdout = WritableNull()
+ #sys.stderr = WritableNull()
+
+def unmutePrint():
+ global _ORIGINAL_STDOUT, _ORIGINAL_STDERR, _MUTED
+ if not _MUTED:
+ return
+ _MUTED = False
+
+ sys.stdout = _ORIGINAL_STDOUT
+ #sys.stderr = _ORIGINAL_STDERR
+