PageRenderTime 59ms CodeModel.GetById 25ms RepoModel.GetById 0ms app.codeStats 0ms

/External.LCA_RESTRICTED/Languages/CPython/27/Lib/urlparse.py

http://github.com/IronLanguages/main
Python | 397 lines | 375 code | 5 blank | 17 comment | 29 complexity | fbcf8efb913faad6c027988c3f051e28 MD5 | raw file
Possible License(s): CPL-1.0, BSD-3-Clause, ISC, GPL-2.0, MPL-2.0-no-copyleft-exception
  1. """Parse (absolute and relative) URLs.
  2. urlparse module is based upon the following RFC specifications.
  3. RFC 3986 (STD66): "Uniform Resource Identifiers" by T. Berners-Lee, R. Fielding
  4. and L. Masinter, January 2005.
  5. RFC 2732 : "Format for Literal IPv6 Addresses in URL's by R.Hinden, B.Carpenter
  6. and L.Masinter, December 1999.
  7. RFC 2396: "Uniform Resource Identifiers (URI)": Generic Syntax by T.
  8. Berners-Lee, R. Fielding, and L. Masinter, August 1998.
  9. RFC 2368: "The mailto URL scheme", by P.Hoffman , L Masinter, J. Zwinski, July 1998.
  10. RFC 1808: "Relative Uniform Resource Locators", by R. Fielding, UC Irvine, June
  11. 1995.
  12. RFC 1738: "Uniform Resource Locators (URL)" by T. Berners-Lee, L. Masinter, M.
  13. McCahill, December 1994
  14. RFC 3986 is considered the current standard and any future changes to
  15. urlparse module should conform with it. The urlparse module is
  16. currently not entirely compliant with this RFC due to defacto
  17. scenarios for parsing, and for backward compatibility purposes, some
  18. parsing quirks from older RFCs are retained. The testcases in
  19. test_urlparse.py provides a good indicator of parsing behavior.
  20. """
  21. __all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag",
  22. "urlsplit", "urlunsplit", "parse_qs", "parse_qsl"]
  23. # A classification of schemes ('' means apply by default)
  24. uses_relative = ['ftp', 'http', 'gopher', 'nntp', 'imap',
  25. 'wais', 'file', 'https', 'shttp', 'mms',
  26. 'prospero', 'rtsp', 'rtspu', '', 'sftp']
  27. uses_netloc = ['ftp', 'http', 'gopher', 'nntp', 'telnet',
  28. 'imap', 'wais', 'file', 'mms', 'https', 'shttp',
  29. 'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', '',
  30. 'svn', 'svn+ssh', 'sftp','nfs','git', 'git+ssh']
  31. non_hierarchical = ['gopher', 'hdl', 'mailto', 'news',
  32. 'telnet', 'wais', 'imap', 'snews', 'sip', 'sips']
  33. uses_params = ['ftp', 'hdl', 'prospero', 'http', 'imap',
  34. 'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips',
  35. 'mms', '', 'sftp']
  36. uses_query = ['http', 'wais', 'imap', 'https', 'shttp', 'mms',
  37. 'gopher', 'rtsp', 'rtspu', 'sip', 'sips', '']
  38. uses_fragment = ['ftp', 'hdl', 'http', 'gopher', 'news',
  39. 'nntp', 'wais', 'https', 'shttp', 'snews',
  40. 'file', 'prospero', '']
  41. # Characters valid in scheme names
  42. scheme_chars = ('abcdefghijklmnopqrstuvwxyz'
  43. 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
  44. '0123456789'
  45. '+-.')
  46. MAX_CACHE_SIZE = 20
  47. _parse_cache = {}
  48. def clear_cache():
  49. """Clear the parse cache."""
  50. _parse_cache.clear()
  51. class ResultMixin(object):
  52. """Shared methods for the parsed result objects."""
  53. @property
  54. def username(self):
  55. netloc = self.netloc
  56. if "@" in netloc:
  57. userinfo = netloc.rsplit("@", 1)[0]
  58. if ":" in userinfo:
  59. userinfo = userinfo.split(":", 1)[0]
  60. return userinfo
  61. return None
  62. @property
  63. def password(self):
  64. netloc = self.netloc
  65. if "@" in netloc:
  66. userinfo = netloc.rsplit("@", 1)[0]
  67. if ":" in userinfo:
  68. return userinfo.split(":", 1)[1]
  69. return None
  70. @property
  71. def hostname(self):
  72. netloc = self.netloc.split('@')[-1]
  73. if '[' in netloc and ']' in netloc:
  74. return netloc.split(']')[0][1:].lower()
  75. elif ':' in netloc:
  76. return netloc.split(':')[0].lower()
  77. elif netloc == '':
  78. return None
  79. else:
  80. return netloc.lower()
  81. @property
  82. def port(self):
  83. netloc = self.netloc.split('@')[-1].split(']')[-1]
  84. if ':' in netloc:
  85. port = netloc.split(':')[1]
  86. return int(port, 10)
  87. else:
  88. return None
  89. from collections import namedtuple
  90. class SplitResult(namedtuple('SplitResult', 'scheme netloc path query fragment'), ResultMixin):
  91. __slots__ = ()
  92. def geturl(self):
  93. return urlunsplit(self)
  94. class ParseResult(namedtuple('ParseResult', 'scheme netloc path params query fragment'), ResultMixin):
  95. __slots__ = ()
  96. def geturl(self):
  97. return urlunparse(self)
  98. def urlparse(url, scheme='', allow_fragments=True):
  99. """Parse a URL into 6 components:
  100. <scheme>://<netloc>/<path>;<params>?<query>#<fragment>
  101. Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
  102. Note that we don't break the components up in smaller bits
  103. (e.g. netloc is a single string) and we don't expand % escapes."""
  104. tuple = urlsplit(url, scheme, allow_fragments)
  105. scheme, netloc, url, query, fragment = tuple
  106. if scheme in uses_params and ';' in url:
  107. url, params = _splitparams(url)
  108. else:
  109. params = ''
  110. return ParseResult(scheme, netloc, url, params, query, fragment)
  111. def _splitparams(url):
  112. if '/' in url:
  113. i = url.find(';', url.rfind('/'))
  114. if i < 0:
  115. return url, ''
  116. else:
  117. i = url.find(';')
  118. return url[:i], url[i+1:]
  119. def _splitnetloc(url, start=0):
  120. delim = len(url) # position of end of domain part of url, default is end
  121. for c in '/?#': # look for delimiters; the order is NOT important
  122. wdelim = url.find(c, start) # find first of this delim
  123. if wdelim >= 0: # if found
  124. delim = min(delim, wdelim) # use earliest delim position
  125. return url[start:delim], url[delim:] # return (domain, rest)
  126. def urlsplit(url, scheme='', allow_fragments=True):
  127. """Parse a URL into 5 components:
  128. <scheme>://<netloc>/<path>?<query>#<fragment>
  129. Return a 5-tuple: (scheme, netloc, path, query, fragment).
  130. Note that we don't break the components up in smaller bits
  131. (e.g. netloc is a single string) and we don't expand % escapes."""
  132. allow_fragments = bool(allow_fragments)
  133. key = url, scheme, allow_fragments, type(url), type(scheme)
  134. cached = _parse_cache.get(key, None)
  135. if cached:
  136. return cached
  137. if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth
  138. clear_cache()
  139. netloc = query = fragment = ''
  140. i = url.find(':')
  141. if i > 0:
  142. if url[:i] == 'http': # optimize the common case
  143. scheme = url[:i].lower()
  144. url = url[i+1:]
  145. if url[:2] == '//':
  146. netloc, url = _splitnetloc(url, 2)
  147. if (('[' in netloc and ']' not in netloc) or
  148. (']' in netloc and '[' not in netloc)):
  149. raise ValueError("Invalid IPv6 URL")
  150. if allow_fragments and '#' in url:
  151. url, fragment = url.split('#', 1)
  152. if '?' in url:
  153. url, query = url.split('?', 1)
  154. v = SplitResult(scheme, netloc, url, query, fragment)
  155. _parse_cache[key] = v
  156. return v
  157. for c in url[:i]:
  158. if c not in scheme_chars:
  159. break
  160. else:
  161. try:
  162. # make sure "url" is not actually a port number (in which case
  163. # "scheme" is really part of the path
  164. _testportnum = int(url[i+1:])
  165. except ValueError:
  166. scheme, url = url[:i].lower(), url[i+1:]
  167. if url[:2] == '//':
  168. netloc, url = _splitnetloc(url, 2)
  169. if (('[' in netloc and ']' not in netloc) or
  170. (']' in netloc and '[' not in netloc)):
  171. raise ValueError("Invalid IPv6 URL")
  172. if allow_fragments and scheme in uses_fragment and '#' in url:
  173. url, fragment = url.split('#', 1)
  174. if scheme in uses_query and '?' in url:
  175. url, query = url.split('?', 1)
  176. v = SplitResult(scheme, netloc, url, query, fragment)
  177. _parse_cache[key] = v
  178. return v
  179. def urlunparse(data):
  180. """Put a parsed URL back together again. This may result in a
  181. slightly different, but equivalent URL, if the URL that was parsed
  182. originally had redundant delimiters, e.g. a ? with an empty query
  183. (the draft states that these are equivalent)."""
  184. scheme, netloc, url, params, query, fragment = data
  185. if params:
  186. url = "%s;%s" % (url, params)
  187. return urlunsplit((scheme, netloc, url, query, fragment))
  188. def urlunsplit(data):
  189. """Combine the elements of a tuple as returned by urlsplit() into a
  190. complete URL as a string. The data argument can be any five-item iterable.
  191. This may result in a slightly different, but equivalent URL, if the URL that
  192. was parsed originally had unnecessary delimiters (for example, a ? with an
  193. empty query; the RFC states that these are equivalent)."""
  194. scheme, netloc, url, query, fragment = data
  195. if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'):
  196. if url and url[:1] != '/': url = '/' + url
  197. url = '//' + (netloc or '') + url
  198. if scheme:
  199. url = scheme + ':' + url
  200. if query:
  201. url = url + '?' + query
  202. if fragment:
  203. url = url + '#' + fragment
  204. return url
  205. def urljoin(base, url, allow_fragments=True):
  206. """Join a base URL and a possibly relative URL to form an absolute
  207. interpretation of the latter."""
  208. if not base:
  209. return url
  210. if not url:
  211. return base
  212. bscheme, bnetloc, bpath, bparams, bquery, bfragment = \
  213. urlparse(base, '', allow_fragments)
  214. scheme, netloc, path, params, query, fragment = \
  215. urlparse(url, bscheme, allow_fragments)
  216. if scheme != bscheme or scheme not in uses_relative:
  217. return url
  218. if scheme in uses_netloc:
  219. if netloc:
  220. return urlunparse((scheme, netloc, path,
  221. params, query, fragment))
  222. netloc = bnetloc
  223. if path[:1] == '/':
  224. return urlunparse((scheme, netloc, path,
  225. params, query, fragment))
  226. if not path and not params:
  227. path = bpath
  228. params = bparams
  229. if not query:
  230. query = bquery
  231. return urlunparse((scheme, netloc, path,
  232. params, query, fragment))
  233. segments = bpath.split('/')[:-1] + path.split('/')
  234. # XXX The stuff below is bogus in various ways...
  235. if segments[-1] == '.':
  236. segments[-1] = ''
  237. while '.' in segments:
  238. segments.remove('.')
  239. while 1:
  240. i = 1
  241. n = len(segments) - 1
  242. while i < n:
  243. if (segments[i] == '..'
  244. and segments[i-1] not in ('', '..')):
  245. del segments[i-1:i+1]
  246. break
  247. i = i+1
  248. else:
  249. break
  250. if segments == ['', '..']:
  251. segments[-1] = ''
  252. elif len(segments) >= 2 and segments[-1] == '..':
  253. segments[-2:] = ['']
  254. return urlunparse((scheme, netloc, '/'.join(segments),
  255. params, query, fragment))
  256. def urldefrag(url):
  257. """Removes any existing fragment from URL.
  258. Returns a tuple of the defragmented URL and the fragment. If
  259. the URL contained no fragments, the second element is the
  260. empty string.
  261. """
  262. if '#' in url:
  263. s, n, p, a, q, frag = urlparse(url)
  264. defrag = urlunparse((s, n, p, a, q, ''))
  265. return defrag, frag
  266. else:
  267. return url, ''
  268. # unquote method for parse_qs and parse_qsl
  269. # Cannot use directly from urllib as it would create a circular reference
  270. # because urllib uses urlparse methods (urljoin). If you update this function,
  271. # update it also in urllib. This code duplication does not existin in Python3.
  272. _hexdig = '0123456789ABCDEFabcdef'
  273. _hextochr = dict((a+b, chr(int(a+b,16)))
  274. for a in _hexdig for b in _hexdig)
  275. def unquote(s):
  276. """unquote('abc%20def') -> 'abc def'."""
  277. res = s.split('%')
  278. # fastpath
  279. if len(res) == 1:
  280. return s
  281. s = res[0]
  282. for item in res[1:]:
  283. try:
  284. s += _hextochr[item[:2]] + item[2:]
  285. except KeyError:
  286. s += '%' + item
  287. except UnicodeDecodeError:
  288. s += unichr(int(item[:2], 16)) + item[2:]
  289. return s
  290. def parse_qs(qs, keep_blank_values=0, strict_parsing=0):
  291. """Parse a query given as a string argument.
  292. Arguments:
  293. qs: percent-encoded query string to be parsed
  294. keep_blank_values: flag indicating whether blank values in
  295. percent-encoded queries should be treated as blank strings.
  296. A true value indicates that blanks should be retained as
  297. blank strings. The default false value indicates that
  298. blank values are to be ignored and treated as if they were
  299. not included.
  300. strict_parsing: flag indicating what to do with parsing errors.
  301. If false (the default), errors are silently ignored.
  302. If true, errors raise a ValueError exception.
  303. """
  304. dict = {}
  305. for name, value in parse_qsl(qs, keep_blank_values, strict_parsing):
  306. if name in dict:
  307. dict[name].append(value)
  308. else:
  309. dict[name] = [value]
  310. return dict
  311. def parse_qsl(qs, keep_blank_values=0, strict_parsing=0):
  312. """Parse a query given as a string argument.
  313. Arguments:
  314. qs: percent-encoded query string to be parsed
  315. keep_blank_values: flag indicating whether blank values in
  316. percent-encoded queries should be treated as blank strings. A
  317. true value indicates that blanks should be retained as blank
  318. strings. The default false value indicates that blank values
  319. are to be ignored and treated as if they were not included.
  320. strict_parsing: flag indicating what to do with parsing errors. If
  321. false (the default), errors are silently ignored. If true,
  322. errors raise a ValueError exception.
  323. Returns a list, as G-d intended.
  324. """
  325. pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
  326. r = []
  327. for name_value in pairs:
  328. if not name_value and not strict_parsing:
  329. continue
  330. nv = name_value.split('=', 1)
  331. if len(nv) != 2:
  332. if strict_parsing:
  333. raise ValueError, "bad query field: %r" % (name_value,)
  334. # Handle case of a control-name with no equal sign
  335. if keep_blank_values:
  336. nv.append('')
  337. else:
  338. continue
  339. if len(nv[1]) or keep_blank_values:
  340. name = unquote(nv[0].replace('+', ' '))
  341. value = unquote(nv[1].replace('+', ' '))
  342. r.append((name, value))
  343. return r