/AI/reinforcement/qlearningAgents.py

https://bitbucket.org/MrOrz/homework · Python · 214 lines · 85 code · 33 blank · 96 comment · 12 complexity · 02bdcc78b7ef2e4f074d256eee4e1d92 MD5 · raw file

  1. # qlearningAgents.py
  2. # ------------------
  3. # Licensing Information: Please do not distribute or publish solutions to this
  4. # project. You are free to use and extend these projects for educational
  5. # purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
  6. # John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
  7. # For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
  8. from game import *
  9. from learningAgents import ReinforcementAgent
  10. from featureExtractors import *
  11. import random,util,math
  12. INF = float('inf')
  13. class QLearningAgent(ReinforcementAgent):
  14. """
  15. Q-Learning Agent
  16. Functions you should fill in:
  17. - getQValue
  18. - getAction
  19. - getValue
  20. - getPolicy
  21. - update
  22. Instance variables you have access to
  23. - self.epsilon (exploration prob)
  24. - self.alpha (learning rate)
  25. - self.gamma (discount rate)
  26. Functions you should use
  27. - self.getLegalActions(state)
  28. which returns legal actions
  29. for a state
  30. """
  31. def __init__(self, **args):
  32. "You can initialize Q-values here..."
  33. ReinforcementAgent.__init__(self, **args)
  34. self.q = util.Counter()
  35. "*** YOUR CODE HERE ***"
  36. def getQValue(self, state, action):
  37. """
  38. Returns Q(state,action)
  39. Should return 0.0 if we never seen
  40. a state or (state,action) tuple
  41. """
  42. "*** YOUR CODE HERE ***"
  43. return self.q[(state, action)]
  44. def getValue(self, state):
  45. """
  46. Returns max_action Q(state,action)
  47. where the max is over legal actions. Note that if
  48. there are no legal actions, which is the case at the
  49. terminal state, you should return a value of 0.0.
  50. """
  51. legalActions = self.getLegalActions(state)
  52. if len(legalActions) == 0:
  53. return 0.0
  54. maxQValue = -INF
  55. for a in legalActions:
  56. q = self.getQValue(state, a)
  57. if q > maxQValue:
  58. maxQValue = q
  59. return maxQValue
  60. def getPolicy(self, state):
  61. """
  62. Compute the best action to take in a state. Note that if there
  63. are no legal actions, which is the case at the terminal state,
  64. you should return None.
  65. """
  66. "*** YOUR CODE HERE ***"
  67. legalActions = self.getLegalActions(state)
  68. if len(legalActions) == 0:
  69. return None
  70. #: List of best choices (qValue, action)
  71. bestChoices = [(-INF, )]
  72. for a in legalActions:
  73. q_s_a = self.getQValue(state, a)
  74. if q_s_a > bestChoices[0][0]: #: better choice is found
  75. bestChoices = [(q_s_a, a)]
  76. elif q_s_a == bestChoices[0][0]: #: a choice as good as the original best one
  77. bestChoices.append( (q_s_a, a) )
  78. return random.choice(bestChoices)[1]
  79. def getAction(self, state):
  80. """
  81. Compute the action to take in the current state. With
  82. probability self.epsilon, we should take a random action and
  83. take the best policy action otherwise. Note that if there are
  84. no legal actions, which is the case at the terminal state, you
  85. should choose None as the action.
  86. HINT: You might want to use util.flipCoin(prob)
  87. HINT: To pick randomly from a list, use random.choice(list)
  88. """
  89. # Pick Action
  90. "*** YOUR CODE HERE ***"
  91. if util.flipCoin(self.epsilon): #: random
  92. legalActions = self.getLegalActions(state)
  93. if len(legalActions) == 0:
  94. return None
  95. return random.choice(legalActions)
  96. else:
  97. return self.getPolicy(state)
  98. def update(self, state, action, nextState, reward):
  99. """
  100. The parent class calls this to observe a
  101. state = action => nextState and reward transition.
  102. You should do your Q-Value update here
  103. NOTE: You should never call this function,
  104. it will be called on your behalf
  105. """
  106. "*** YOUR CODE HERE ***"
  107. q_s_a = self.q[(state, action)]
  108. self.q[(state, action)] = q_s_a + \
  109. self.alpha * (reward + self.gamma * self.getValue(nextState) - q_s_a)
  110. class PacmanQAgent(QLearningAgent):
  111. "Exactly the same as QLearningAgent, but with different default parameters"
  112. def __init__(self, epsilon=0.05,gamma=0.8,alpha=0.2, numTraining=0, **args):
  113. """
  114. These default parameters can be changed from the pacman.py command line.
  115. For example, to change the exploration rate, try:
  116. python pacman.py -p PacmanQLearningAgent -a epsilon=0.1
  117. alpha - learning rate
  118. epsilon - exploration rate
  119. gamma - discount factor
  120. numTraining - number of training episodes, i.e. no learning after these many episodes
  121. """
  122. args['epsilon'] = epsilon
  123. args['gamma'] = gamma
  124. args['alpha'] = alpha
  125. args['numTraining'] = numTraining
  126. self.index = 0 # This is always Pacman
  127. QLearningAgent.__init__(self, **args)
  128. def getAction(self, state):
  129. """
  130. Simply calls the getAction method of QLearningAgent and then
  131. informs parent of action for Pacman. Do not change or remove this
  132. method.
  133. """
  134. action = QLearningAgent.getAction(self,state)
  135. self.doAction(state,action)
  136. return action
  137. class ApproximateQAgent(PacmanQAgent):
  138. """
  139. ApproximateQLearningAgent
  140. You should only have to overwrite getQValue
  141. and update. All other QLearningAgent functions
  142. should work as is.
  143. """
  144. def __init__(self, extractor='IdentityExtractor', **args):
  145. self.featExtractor = util.lookup(extractor, globals())()
  146. PacmanQAgent.__init__(self, **args)
  147. # You might want to initialize weights here.
  148. "*** YOUR CODE HERE ***"
  149. self.w = util.Counter()
  150. def getQValue(self, state, action):
  151. """
  152. Should return Q(state,action) = w * featureVector
  153. where * is the dotProduct operator
  154. """
  155. "*** YOUR CODE HERE ***"
  156. return self.w * self.featExtractor.getFeatures(state, action)
  157. def update(self, state, action, nextState, reward):
  158. """
  159. Should update your weights based on transition
  160. """
  161. "*** YOUR CODE HERE ***"
  162. f = self.featExtractor.getFeatures(state, action)
  163. for (i, fi) in f.items():
  164. correction = reward + self.gamma * self.getValue(nextState) - self.getQValue(state, action)
  165. self.w[i] = self.w[i] + self.alpha * correction * fi
  166. def final(self, state):
  167. "Called at the end of each game."
  168. # call the super-class final method
  169. PacmanQAgent.final(self, state)
  170. # did we finish training?
  171. if self.episodesSoFar == self.numTraining:
  172. # you might want to print your weights here for debugging
  173. "*** YOUR CODE HERE ***"
  174. pass