PageRenderTime 49ms CodeModel.GetById 22ms RepoModel.GetById 0ms app.codeStats 0ms

/django/utils/html.py

https://github.com/insane/django
Python | 313 lines | 275 code | 17 blank | 21 comment | 11 complexity | 0952c6329dcbf233610a8be967b775e3 MD5 | raw file
Possible License(s): BSD-3-Clause
  1. """HTML utilities suitable for global use."""
  2. from __future__ import unicode_literals
  3. import re
  4. try:
  5. from urllib.parse import quote, urlsplit, urlunsplit
  6. except ImportError: # Python 2
  7. from urllib import quote
  8. from urlparse import urlsplit, urlunsplit
  9. from django.utils.safestring import SafeData, mark_safe
  10. from django.utils.encoding import force_bytes, force_text
  11. from django.utils.functional import allow_lazy
  12. from django.utils import six
  13. from django.utils.text import normalize_newlines
  14. from .html_parser import HTMLParser, HTMLParseError
  15. # Configuration for urlize() function.
  16. TRAILING_PUNCTUATION = ['.', ',', ':', ';', '.)']
  17. WRAPPING_PUNCTUATION = [('(', ')'), ('<', '>'), ('[', ']'), ('&lt;', '&gt;')]
  18. # List of possible strings used for bullets in bulleted lists.
  19. DOTS = ['&middot;', '*', '\u2022', '&#149;', '&bull;', '&#8226;']
  20. unencoded_ampersands_re = re.compile(r'&(?!(\w+|#\d+);)')
  21. unquoted_percents_re = re.compile(r'%(?![0-9A-Fa-f]{2})')
  22. word_split_re = re.compile(r'(\s+)')
  23. simple_url_re = re.compile(r'^https?://\[?\w', re.IGNORECASE)
  24. simple_url_2_re = re.compile(r'^www\.|^(?!http)\w[^@]+\.(com|edu|gov|int|mil|net|org)$', re.IGNORECASE)
  25. simple_email_re = re.compile(r'^\S+@\S+\.\S+$')
  26. link_target_attribute_re = re.compile(r'(<a [^>]*?)target=[^\s>]+')
  27. html_gunk_re = re.compile(r'(?:<br clear="all">|<i><\/i>|<b><\/b>|<em><\/em>|<strong><\/strong>|<\/?smallcaps>|<\/?uppercase>)', re.IGNORECASE)
  28. hard_coded_bullets_re = re.compile(r'((?:<p>(?:%s).*?[a-zA-Z].*?</p>\s*)+)' % '|'.join([re.escape(x) for x in DOTS]), re.DOTALL)
  29. trailing_empty_content_re = re.compile(r'(?:<p>(?:&nbsp;|\s|<br \/>)*?</p>\s*)+\Z')
  30. def escape(text):
  31. """
  32. Returns the given text with ampersands, quotes and angle brackets encoded for use in HTML.
  33. """
  34. return mark_safe(force_text(text).replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;').replace('"', '&quot;').replace("'", '&#39;'))
  35. escape = allow_lazy(escape, six.text_type)
  36. _js_escapes = {
  37. ord('\\'): '\\u005C',
  38. ord('\''): '\\u0027',
  39. ord('"'): '\\u0022',
  40. ord('>'): '\\u003E',
  41. ord('<'): '\\u003C',
  42. ord('&'): '\\u0026',
  43. ord('='): '\\u003D',
  44. ord('-'): '\\u002D',
  45. ord(';'): '\\u003B',
  46. ord('\u2028'): '\\u2028',
  47. ord('\u2029'): '\\u2029'
  48. }
  49. # Escape every ASCII character with a value less than 32.
  50. _js_escapes.update((ord('%c' % z), '\\u%04X' % z) for z in range(32))
  51. def escapejs(value):
  52. """Hex encodes characters for use in JavaScript strings."""
  53. return mark_safe(force_text(value).translate(_js_escapes))
  54. escapejs = allow_lazy(escapejs, six.text_type)
  55. def conditional_escape(text):
  56. """
  57. Similar to escape(), except that it doesn't operate on pre-escaped strings.
  58. """
  59. if isinstance(text, SafeData):
  60. return text
  61. else:
  62. return escape(text)
  63. def format_html(format_string, *args, **kwargs):
  64. """
  65. Similar to str.format, but passes all arguments through conditional_escape,
  66. and calls 'mark_safe' on the result. This function should be used instead
  67. of str.format or % interpolation to build up small HTML fragments.
  68. """
  69. args_safe = map(conditional_escape, args)
  70. kwargs_safe = dict([(k, conditional_escape(v)) for (k, v) in
  71. six.iteritems(kwargs)])
  72. return mark_safe(format_string.format(*args_safe, **kwargs_safe))
  73. def format_html_join(sep, format_string, args_generator):
  74. """
  75. A wrapper of format_html, for the common case of a group of arguments that
  76. need to be formatted using the same format string, and then joined using
  77. 'sep'. 'sep' is also passed through conditional_escape.
  78. 'args_generator' should be an iterator that returns the sequence of 'args'
  79. that will be passed to format_html.
  80. Example:
  81. format_html_join('\n', "<li>{0} {1}</li>", ((u.first_name, u.last_name)
  82. for u in users))
  83. """
  84. return mark_safe(conditional_escape(sep).join(
  85. format_html(format_string, *tuple(args))
  86. for args in args_generator))
  87. def linebreaks(value, autoescape=False):
  88. """Converts newlines into <p> and <br />s."""
  89. value = normalize_newlines(value)
  90. paras = re.split('\n{2,}', value)
  91. if autoescape:
  92. paras = ['<p>%s</p>' % escape(p).replace('\n', '<br />') for p in paras]
  93. else:
  94. paras = ['<p>%s</p>' % p.replace('\n', '<br />') for p in paras]
  95. return '\n\n'.join(paras)
  96. linebreaks = allow_lazy(linebreaks, six.text_type)
  97. class MLStripper(HTMLParser):
  98. def __init__(self):
  99. HTMLParser.__init__(self)
  100. self.reset()
  101. self.fed = []
  102. def handle_data(self, d):
  103. self.fed.append(d)
  104. def handle_entityref(self, name):
  105. self.fed.append('&%s;' % name)
  106. def handle_charref(self, name):
  107. self.fed.append('&#%s;' % name)
  108. def get_data(self):
  109. return ''.join(self.fed)
  110. def strip_tags(value):
  111. """Returns the given HTML with all tags stripped."""
  112. s = MLStripper()
  113. try:
  114. s.feed(value)
  115. s.close()
  116. except HTMLParseError:
  117. return value
  118. else:
  119. return s.get_data()
  120. strip_tags = allow_lazy(strip_tags)
  121. def remove_tags(html, tags):
  122. """Returns the given HTML with given tags removed."""
  123. tags = [re.escape(tag) for tag in tags.split()]
  124. tags_re = '(%s)' % '|'.join(tags)
  125. starttag_re = re.compile(r'<%s(/?>|(\s+[^>]*>))' % tags_re, re.U)
  126. endtag_re = re.compile('</%s>' % tags_re)
  127. html = starttag_re.sub('', html)
  128. html = endtag_re.sub('', html)
  129. return html
  130. remove_tags = allow_lazy(remove_tags, six.text_type)
  131. def strip_spaces_between_tags(value):
  132. """Returns the given HTML with spaces between tags removed."""
  133. return re.sub(r'>\s+<', '><', force_text(value))
  134. strip_spaces_between_tags = allow_lazy(strip_spaces_between_tags, six.text_type)
  135. def strip_entities(value):
  136. """Returns the given HTML with all entities (&something;) stripped."""
  137. return re.sub(r'&(?:\w+|#\d+);', '', force_text(value))
  138. strip_entities = allow_lazy(strip_entities, six.text_type)
  139. def fix_ampersands(value):
  140. """Returns the given HTML with all unencoded ampersands encoded correctly."""
  141. return unencoded_ampersands_re.sub('&amp;', force_text(value))
  142. fix_ampersands = allow_lazy(fix_ampersands, six.text_type)
  143. def smart_urlquote(url):
  144. "Quotes a URL if it isn't already quoted."
  145. # Handle IDN before quoting.
  146. try:
  147. scheme, netloc, path, query, fragment = urlsplit(url)
  148. try:
  149. netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE
  150. except UnicodeError: # invalid domain part
  151. pass
  152. else:
  153. url = urlunsplit((scheme, netloc, path, query, fragment))
  154. except ValueError:
  155. # invalid IPv6 URL (normally square brackets in hostname part).
  156. pass
  157. # An URL is considered unquoted if it contains no % characters or
  158. # contains a % not followed by two hexadecimal digits. See #9655.
  159. if '%' not in url or unquoted_percents_re.search(url):
  160. # See http://bugs.python.org/issue2637
  161. url = quote(force_bytes(url), safe=b'!*\'();:@&=+$,/?#[]~')
  162. return force_text(url)
  163. def urlize(text, trim_url_limit=None, nofollow=False, autoescape=False):
  164. """
  165. Converts any URLs in text into clickable links.
  166. Works on http://, https://, www. links, and also on links ending in one of
  167. the original seven gTLDs (.com, .edu, .gov, .int, .mil, .net, and .org).
  168. Links can have trailing punctuation (periods, commas, close-parens) and
  169. leading punctuation (opening parens) and it'll still do the right thing.
  170. If trim_url_limit is not None, the URLs in link text longer than this limit
  171. will truncated to trim_url_limit-3 characters and appended with an elipsis.
  172. If nofollow is True, the URLs in link text will get a rel="nofollow"
  173. attribute.
  174. If autoescape is True, the link text and URLs will get autoescaped.
  175. """
  176. def trim_url(x, limit=trim_url_limit):
  177. if limit is None or len(x) <= limit:
  178. return x
  179. return '%s...' % x[:max(0, limit - 3)]
  180. safe_input = isinstance(text, SafeData)
  181. words = word_split_re.split(force_text(text))
  182. for i, word in enumerate(words):
  183. match = None
  184. if '.' in word or '@' in word or ':' in word:
  185. # Deal with punctuation.
  186. lead, middle, trail = '', word, ''
  187. for punctuation in TRAILING_PUNCTUATION:
  188. if middle.endswith(punctuation):
  189. middle = middle[:-len(punctuation)]
  190. trail = punctuation + trail
  191. for opening, closing in WRAPPING_PUNCTUATION:
  192. if middle.startswith(opening):
  193. middle = middle[len(opening):]
  194. lead = lead + opening
  195. # Keep parentheses at the end only if they're balanced.
  196. if (middle.endswith(closing)
  197. and middle.count(closing) == middle.count(opening) + 1):
  198. middle = middle[:-len(closing)]
  199. trail = closing + trail
  200. # Make URL we want to point to.
  201. url = None
  202. nofollow_attr = ' rel="nofollow"' if nofollow else ''
  203. if simple_url_re.match(middle):
  204. url = smart_urlquote(middle)
  205. elif simple_url_2_re.match(middle):
  206. url = smart_urlquote('http://%s' % middle)
  207. elif not ':' in middle and simple_email_re.match(middle):
  208. local, domain = middle.rsplit('@', 1)
  209. try:
  210. domain = domain.encode('idna').decode('ascii')
  211. except UnicodeError:
  212. continue
  213. url = 'mailto:%s@%s' % (local, domain)
  214. nofollow_attr = ''
  215. # Make link.
  216. if url:
  217. trimmed = trim_url(middle)
  218. if autoescape and not safe_input:
  219. lead, trail = escape(lead), escape(trail)
  220. url, trimmed = escape(url), escape(trimmed)
  221. middle = '<a href="%s"%s>%s</a>' % (url, nofollow_attr, trimmed)
  222. words[i] = mark_safe('%s%s%s' % (lead, middle, trail))
  223. else:
  224. if safe_input:
  225. words[i] = mark_safe(word)
  226. elif autoescape:
  227. words[i] = escape(word)
  228. elif safe_input:
  229. words[i] = mark_safe(word)
  230. elif autoescape:
  231. words[i] = escape(word)
  232. return ''.join(words)
  233. urlize = allow_lazy(urlize, six.text_type)
  234. def clean_html(text):
  235. """
  236. Clean the given HTML. Specifically, do the following:
  237. * Convert <b> and <i> to <strong> and <em>.
  238. * Encode all ampersands correctly.
  239. * Remove all "target" attributes from <a> tags.
  240. * Remove extraneous HTML, such as presentational tags that open and
  241. immediately close and <br clear="all">.
  242. * Convert hard-coded bullets into HTML unordered lists.
  243. * Remove stuff like "<p>&nbsp;&nbsp;</p>", but only if it's at the
  244. bottom of the text.
  245. """
  246. from django.utils.text import normalize_newlines
  247. text = normalize_newlines(force_text(text))
  248. text = re.sub(r'<(/?)\s*b\s*>', '<\\1strong>', text)
  249. text = re.sub(r'<(/?)\s*i\s*>', '<\\1em>', text)
  250. text = fix_ampersands(text)
  251. # Remove all target="" attributes from <a> tags.
  252. text = link_target_attribute_re.sub('\\1', text)
  253. # Trim stupid HTML such as <br clear="all">.
  254. text = html_gunk_re.sub('', text)
  255. # Convert hard-coded bullets into HTML unordered lists.
  256. def replace_p_tags(match):
  257. s = match.group().replace('</p>', '</li>')
  258. for d in DOTS:
  259. s = s.replace('<p>%s' % d, '<li>')
  260. return '<ul>\n%s\n</ul>' % s
  261. text = hard_coded_bullets_re.sub(replace_p_tags, text)
  262. # Remove stuff like "<p>&nbsp;&nbsp;</p>", but only if it's at the bottom
  263. # of the text.
  264. text = trailing_empty_content_re.sub('', text)
  265. return text
  266. clean_html = allow_lazy(clean_html, six.text_type)
  267. def avoid_wrapping(value):
  268. """
  269. Avoid text wrapping in the middle of a phrase by adding non-breaking
  270. spaces where there previously were normal spaces.
  271. """
  272. return value.replace(" ", "\xa0")