PageRenderTime 45ms CodeModel.GetById 17ms RepoModel.GetById 1ms app.codeStats 0ms

/django/utils/http.py

https://github.com/insane/django
Python | 263 lines | 224 code | 8 blank | 31 comment | 4 complexity | 389d1d2a02931a97d16872efe8a6e733 MD5 | raw file
Possible License(s): BSD-3-Clause
  1. from __future__ import unicode_literals
  2. import base64
  3. import calendar
  4. import datetime
  5. import re
  6. import sys
  7. try:
  8. from urllib import parse as urllib_parse
  9. except ImportError: # Python 2
  10. import urllib as urllib_parse
  11. import urlparse
  12. urllib_parse.urlparse = urlparse.urlparse
  13. from binascii import Error as BinasciiError
  14. from email.utils import formatdate
  15. from django.utils.datastructures import MultiValueDict
  16. from django.utils.encoding import force_str, force_text
  17. from django.utils.functional import allow_lazy
  18. from django.utils import six
  19. ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"')
  20. MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
  21. __D = r'(?P<day>\d{2})'
  22. __D2 = r'(?P<day>[ \d]\d)'
  23. __M = r'(?P<mon>\w{3})'
  24. __Y = r'(?P<year>\d{4})'
  25. __Y2 = r'(?P<year>\d{2})'
  26. __T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
  27. RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
  28. RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
  29. ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
  30. def urlquote(url, safe='/'):
  31. """
  32. A version of Python's urllib.quote() function that can operate on unicode
  33. strings. The url is first UTF-8 encoded before quoting. The returned string
  34. can safely be used as part of an argument to a subsequent iri_to_uri() call
  35. without double-quoting occurring.
  36. """
  37. return force_text(urllib_parse.quote(force_str(url), force_str(safe)))
  38. urlquote = allow_lazy(urlquote, six.text_type)
  39. def urlquote_plus(url, safe=''):
  40. """
  41. A version of Python's urllib.quote_plus() function that can operate on
  42. unicode strings. The url is first UTF-8 encoded before quoting. The
  43. returned string can safely be used as part of an argument to a subsequent
  44. iri_to_uri() call without double-quoting occurring.
  45. """
  46. return force_text(urllib_parse.quote_plus(force_str(url), force_str(safe)))
  47. urlquote_plus = allow_lazy(urlquote_plus, six.text_type)
  48. def urlunquote(quoted_url):
  49. """
  50. A wrapper for Python's urllib.unquote() function that can operate on
  51. the result of django.utils.http.urlquote().
  52. """
  53. return force_text(urllib_parse.unquote(force_str(quoted_url)))
  54. urlunquote = allow_lazy(urlunquote, six.text_type)
  55. def urlunquote_plus(quoted_url):
  56. """
  57. A wrapper for Python's urllib.unquote_plus() function that can operate on
  58. the result of django.utils.http.urlquote_plus().
  59. """
  60. return force_text(urllib_parse.unquote_plus(force_str(quoted_url)))
  61. urlunquote_plus = allow_lazy(urlunquote_plus, six.text_type)
  62. def urlencode(query, doseq=0):
  63. """
  64. A version of Python's urllib.urlencode() function that can operate on
  65. unicode strings. The parameters are first cast to UTF-8 encoded strings and
  66. then encoded as per normal.
  67. """
  68. if isinstance(query, MultiValueDict):
  69. query = query.lists()
  70. elif hasattr(query, 'items'):
  71. query = query.items()
  72. return urllib_parse.urlencode(
  73. [(force_str(k),
  74. [force_str(i) for i in v] if isinstance(v, (list,tuple)) else force_str(v))
  75. for k, v in query],
  76. doseq)
  77. def cookie_date(epoch_seconds=None):
  78. """
  79. Formats the time to ensure compatibility with Netscape's cookie standard.
  80. Accepts a floating point number expressed in seconds since the epoch, in
  81. UTC - such as that outputted by time.time(). If set to None, defaults to
  82. the current time.
  83. Outputs a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
  84. """
  85. rfcdate = formatdate(epoch_seconds)
  86. return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
  87. def http_date(epoch_seconds=None):
  88. """
  89. Formats the time to match the RFC1123 date format as specified by HTTP
  90. RFC2616 section 3.3.1.
  91. Accepts a floating point number expressed in seconds since the epoch, in
  92. UTC - such as that outputted by time.time(). If set to None, defaults to
  93. the current time.
  94. Outputs a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
  95. """
  96. rfcdate = formatdate(epoch_seconds)
  97. return '%s GMT' % rfcdate[:25]
  98. def parse_http_date(date):
  99. """
  100. Parses a date format as specified by HTTP RFC2616 section 3.3.1.
  101. The three formats allowed by the RFC are accepted, even if only the first
  102. one is still in widespread use.
  103. Returns an integer expressed in seconds since the epoch, in UTC.
  104. """
  105. # emails.Util.parsedate does the job for RFC1123 dates; unfortunately
  106. # RFC2616 makes it mandatory to support RFC850 dates too. So we roll
  107. # our own RFC-compliant parsing.
  108. for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
  109. m = regex.match(date)
  110. if m is not None:
  111. break
  112. else:
  113. raise ValueError("%r is not in a valid HTTP date format" % date)
  114. try:
  115. year = int(m.group('year'))
  116. if year < 100:
  117. if year < 70:
  118. year += 2000
  119. else:
  120. year += 1900
  121. month = MONTHS.index(m.group('mon').lower()) + 1
  122. day = int(m.group('day'))
  123. hour = int(m.group('hour'))
  124. min = int(m.group('min'))
  125. sec = int(m.group('sec'))
  126. result = datetime.datetime(year, month, day, hour, min, sec)
  127. return calendar.timegm(result.utctimetuple())
  128. except Exception:
  129. six.reraise(ValueError, ValueError("%r is not a valid date" % date), sys.exc_info()[2])
  130. def parse_http_date_safe(date):
  131. """
  132. Same as parse_http_date, but returns None if the input is invalid.
  133. """
  134. try:
  135. return parse_http_date(date)
  136. except Exception:
  137. pass
  138. # Base 36 functions: useful for generating compact URLs
  139. def base36_to_int(s):
  140. """
  141. Converts a base 36 string to an ``int``. Raises ``ValueError` if the
  142. input won't fit into an int.
  143. """
  144. # To prevent overconsumption of server resources, reject any
  145. # base36 string that is long than 13 base36 digits (13 digits
  146. # is sufficient to base36-encode any 64-bit integer)
  147. if len(s) > 13:
  148. raise ValueError("Base36 input too large")
  149. value = int(s, 36)
  150. # ... then do a final check that the value will fit into an int to avoid
  151. # returning a long (#15067). The long type was removed in Python 3.
  152. if not six.PY3 and value > sys.maxint:
  153. raise ValueError("Base36 input too large")
  154. return value
  155. def int_to_base36(i):
  156. """
  157. Converts an integer to a base36 string
  158. """
  159. digits = "0123456789abcdefghijklmnopqrstuvwxyz"
  160. factor = 0
  161. if i < 0:
  162. raise ValueError("Negative base36 conversion input.")
  163. if not six.PY3:
  164. if not isinstance(i, six.integer_types):
  165. raise TypeError("Non-integer base36 conversion input.")
  166. if i > sys.maxint:
  167. raise ValueError("Base36 conversion input too large.")
  168. # Find starting factor
  169. while True:
  170. factor += 1
  171. if i < 36 ** factor:
  172. factor -= 1
  173. break
  174. base36 = []
  175. # Construct base36 representation
  176. while factor >= 0:
  177. j = 36 ** factor
  178. base36.append(digits[i // j])
  179. i = i % j
  180. factor -= 1
  181. return ''.join(base36)
  182. def urlsafe_base64_encode(s):
  183. """
  184. Encodes a bytestring in base64 for use in URLs, stripping any trailing
  185. equal signs.
  186. """
  187. return base64.urlsafe_b64encode(s).rstrip(b'\n=')
  188. def urlsafe_base64_decode(s):
  189. """
  190. Decodes a base64 encoded string, adding back any trailing equal signs that
  191. might have been stripped.
  192. """
  193. s = s.encode('utf-8') # base64encode should only return ASCII.
  194. try:
  195. return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b'='))
  196. except (LookupError, BinasciiError) as e:
  197. raise ValueError(e)
  198. def parse_etags(etag_str):
  199. """
  200. Parses a string with one or several etags passed in If-None-Match and
  201. If-Match headers by the rules in RFC 2616. Returns a list of etags
  202. without surrounding double quotes (") and unescaped from \<CHAR>.
  203. """
  204. etags = ETAG_MATCH.findall(etag_str)
  205. if not etags:
  206. # etag_str has wrong format, treat it as an opaque string then
  207. return [etag_str]
  208. etags = [e.encode('ascii').decode('unicode_escape') for e in etags]
  209. return etags
  210. def quote_etag(etag):
  211. """
  212. Wraps a string in double quotes escaping contents as necessary.
  213. """
  214. return '"%s"' % etag.replace('\\', '\\\\').replace('"', '\\"')
  215. def same_origin(url1, url2):
  216. """
  217. Checks if two URLs are 'same-origin'
  218. """
  219. p1, p2 = urllib_parse.urlparse(url1), urllib_parse.urlparse(url2)
  220. try:
  221. return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port)
  222. except ValueError:
  223. return False
  224. def is_safe_url(url, host=None):
  225. """
  226. Return ``True`` if the url is a safe redirection (i.e. it doesn't point to
  227. a different host).
  228. Always returns ``False`` on an empty url.
  229. """
  230. if not url:
  231. return False
  232. netloc = urllib_parse.urlparse(url)[1]
  233. return not netloc or netloc == host