PageRenderTime 49ms CodeModel.GetById 19ms RepoModel.GetById 0ms app.codeStats 0ms

/source/web.py-0.36/build/lib.linux-i686-2.6/web/browser.py

https://bitbucket.org/synl0rd/upt_tik_itenas
Python | 236 lines | 207 code | 17 blank | 12 comment | 4 complexity | ca0cb87b51e9bbd558e66ec16a68fe2a MD5 | raw file
  1. """Browser to test web applications.
  2. (from web.py)
  3. """
  4. from utils import re_compile
  5. from net import htmlunquote
  6. import httplib, urllib, urllib2
  7. import copy
  8. from StringIO import StringIO
  9. DEBUG = False
  10. __all__ = [
  11. "BrowserError",
  12. "Browser", "AppBrowser",
  13. "AppHandler"
  14. ]
  15. class BrowserError(Exception):
  16. pass
  17. class Browser:
  18. def __init__(self):
  19. import cookielib
  20. self.cookiejar = cookielib.CookieJar()
  21. self._cookie_processor = urllib2.HTTPCookieProcessor(self.cookiejar)
  22. self.form = None
  23. self.url = "http://0.0.0.0:8080/"
  24. self.path = "/"
  25. self.status = None
  26. self.data = None
  27. self._response = None
  28. self._forms = None
  29. def reset(self):
  30. """Clears all cookies and history."""
  31. self.cookiejar.clear()
  32. def build_opener(self):
  33. """Builds the opener using urllib2.build_opener.
  34. Subclasses can override this function to prodive custom openers.
  35. """
  36. return urllib2.build_opener()
  37. def do_request(self, req):
  38. if DEBUG:
  39. print 'requesting', req.get_method(), req.get_full_url()
  40. opener = self.build_opener()
  41. opener.add_handler(self._cookie_processor)
  42. try:
  43. self._response = opener.open(req)
  44. except urllib2.HTTPError, e:
  45. self._response = e
  46. self.url = self._response.geturl()
  47. self.path = urllib2.Request(self.url).get_selector()
  48. self.data = self._response.read()
  49. self.status = self._response.code
  50. self._forms = None
  51. self.form = None
  52. return self.get_response()
  53. def open(self, url, data=None, headers={}):
  54. """Opens the specified url."""
  55. url = urllib.basejoin(self.url, url)
  56. req = urllib2.Request(url, data, headers)
  57. return self.do_request(req)
  58. def show(self):
  59. """Opens the current page in real web browser."""
  60. f = open('page.html', 'w')
  61. f.write(self.data)
  62. f.close()
  63. import webbrowser, os
  64. url = 'file://' + os.path.abspath('page.html')
  65. webbrowser.open(url)
  66. def get_response(self):
  67. """Returns a copy of the current response."""
  68. return urllib.addinfourl(StringIO(self.data), self._response.info(), self._response.geturl())
  69. def get_soup(self):
  70. """Returns beautiful soup of the current document."""
  71. import BeautifulSoup
  72. return BeautifulSoup.BeautifulSoup(self.data)
  73. def get_text(self, e=None):
  74. """Returns content of e or the current document as plain text."""
  75. e = e or self.get_soup()
  76. return ''.join([htmlunquote(c) for c in e.recursiveChildGenerator() if isinstance(c, unicode)])
  77. def _get_links(self):
  78. soup = self.get_soup()
  79. return [a for a in soup.findAll(name='a')]
  80. def get_links(self, text=None, text_regex=None, url=None, url_regex=None, predicate=None):
  81. """Returns all links in the document."""
  82. return self._filter_links(self._get_links(),
  83. text=text, text_regex=text_regex, url=url, url_regex=url_regex, predicate=predicate)
  84. def follow_link(self, link=None, text=None, text_regex=None, url=None, url_regex=None, predicate=None):
  85. if link is None:
  86. links = self._filter_links(self.get_links(),
  87. text=text, text_regex=text_regex, url=url, url_regex=url_regex, predicate=predicate)
  88. link = links and links[0]
  89. if link:
  90. return self.open(link['href'])
  91. else:
  92. raise BrowserError("No link found")
  93. def find_link(self, text=None, text_regex=None, url=None, url_regex=None, predicate=None):
  94. links = self._filter_links(self.get_links(),
  95. text=text, text_regex=text_regex, url=url, url_regex=url_regex, predicate=predicate)
  96. return links and links[0] or None
  97. def _filter_links(self, links,
  98. text=None, text_regex=None,
  99. url=None, url_regex=None,
  100. predicate=None):
  101. predicates = []
  102. if text is not None:
  103. predicates.append(lambda link: link.string == text)
  104. if text_regex is not None:
  105. predicates.append(lambda link: re_compile(text_regex).search(link.string or ''))
  106. if url is not None:
  107. predicates.append(lambda link: link.get('href') == url)
  108. if url_regex is not None:
  109. predicates.append(lambda link: re_compile(url_regex).search(link.get('href', '')))
  110. if predicate:
  111. predicate.append(predicate)
  112. def f(link):
  113. for p in predicates:
  114. if not p(link):
  115. return False
  116. return True
  117. return [link for link in links if f(link)]
  118. def get_forms(self):
  119. """Returns all forms in the current document.
  120. The returned form objects implement the ClientForm.HTMLForm interface.
  121. """
  122. if self._forms is None:
  123. import ClientForm
  124. self._forms = ClientForm.ParseResponse(self.get_response(), backwards_compat=False)
  125. return self._forms
  126. def select_form(self, name=None, predicate=None, index=0):
  127. """Selects the specified form."""
  128. forms = self.get_forms()
  129. if name is not None:
  130. forms = [f for f in forms if f.name == name]
  131. if predicate:
  132. forms = [f for f in forms if predicate(f)]
  133. if forms:
  134. self.form = forms[index]
  135. return self.form
  136. else:
  137. raise BrowserError("No form selected.")
  138. def submit(self, **kw):
  139. """submits the currently selected form."""
  140. if self.form is None:
  141. raise BrowserError("No form selected.")
  142. req = self.form.click(**kw)
  143. return self.do_request(req)
  144. def __getitem__(self, key):
  145. return self.form[key]
  146. def __setitem__(self, key, value):
  147. self.form[key] = value
  148. class AppBrowser(Browser):
  149. """Browser interface to test web.py apps.
  150. b = AppBrowser(app)
  151. b.open('/')
  152. b.follow_link(text='Login')
  153. b.select_form(name='login')
  154. b['username'] = 'joe'
  155. b['password'] = 'secret'
  156. b.submit()
  157. assert b.path == '/'
  158. assert 'Welcome joe' in b.get_text()
  159. """
  160. def __init__(self, app):
  161. Browser.__init__(self)
  162. self.app = app
  163. def build_opener(self):
  164. return urllib2.build_opener(AppHandler(self.app))
  165. class AppHandler(urllib2.HTTPHandler):
  166. """urllib2 handler to handle requests using web.py application."""
  167. handler_order = 100
  168. def __init__(self, app):
  169. self.app = app
  170. def http_open(self, req):
  171. result = self.app.request(
  172. localpart=req.get_selector(),
  173. method=req.get_method(),
  174. host=req.get_host(),
  175. data=req.get_data(),
  176. headers=dict(req.header_items()),
  177. https=req.get_type() == "https"
  178. )
  179. return self._make_response(result, req.get_full_url())
  180. def https_open(self, req):
  181. return self.http_open(req)
  182. try:
  183. https_request = urllib2.HTTPHandler.do_request_
  184. except AttributeError:
  185. # for python 2.3
  186. pass
  187. def _make_response(self, result, url):
  188. data = "\r\n".join(["%s: %s" % (k, v) for k, v in result.header_items])
  189. headers = httplib.HTTPMessage(StringIO(data))
  190. response = urllib.addinfourl(StringIO(result.data), headers, url)
  191. code, msg = result.status.split(None, 1)
  192. response.code, response.msg = int(code), msg
  193. return response