PageRenderTime 62ms CodeModel.GetById 22ms RepoModel.GetById 0ms app.codeStats 1ms

/syntaxhighlight/pygments/lexers/templates.py

http://github.com/plushcms/PlushCMS
Python | 1583 lines | 1206 code | 162 blank | 215 comment | 44 complexity | 5c679c887905ece216cec7825aa53d90 MD5 | raw file
  1. # -*- coding: utf-8 -*-
  2. """
  3. plushcms.syntaxhighlight.pygments.lexers.templates
  4. ~~~~~~~~~~~~~~~~~~~~~~~~~
  5. Lexers for various template engines' markup.
  6. :copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
  7. :license: BSD, see LICENSE for details.
  8. """
  9. import re
  10. from plushcms.syntaxhighlight.pygments.lexers.web import \
  11. PhpLexer, HtmlLexer, XmlLexer, JavascriptLexer, CssLexer
  12. from plushcms.syntaxhighlight.pygments.lexers.agile import PythonLexer, PerlLexer
  13. from plushcms.syntaxhighlight.pygments.lexers.compiled import JavaLexer
  14. from plushcms.syntaxhighlight.pygments.lexer import Lexer, DelegatingLexer, RegexLexer, bygroups, \
  15. include, using, this
  16. from plushcms.syntaxhighlight.pygments.token import Error, Punctuation, \
  17. Text, Comment, Operator, Keyword, Name, String, Number, Other, Token
  18. from plushcms.syntaxhighlight.pygments.util import html_doctype_matches, looks_like_xml
  19. __all__ = ['HtmlPhpLexer', 'XmlPhpLexer', 'CssPhpLexer',
  20. 'JavascriptPhpLexer', 'ErbLexer', 'RhtmlLexer',
  21. 'XmlErbLexer', 'CssErbLexer', 'JavascriptErbLexer',
  22. 'SmartyLexer', 'HtmlSmartyLexer', 'XmlSmartyLexer',
  23. 'CssSmartyLexer', 'JavascriptSmartyLexer', 'DjangoLexer',
  24. 'HtmlDjangoLexer', 'CssDjangoLexer', 'XmlDjangoLexer',
  25. 'JavascriptDjangoLexer', 'GenshiLexer', 'HtmlGenshiLexer',
  26. 'GenshiTextLexer', 'CssGenshiLexer', 'JavascriptGenshiLexer',
  27. 'MyghtyLexer', 'MyghtyHtmlLexer', 'MyghtyXmlLexer',
  28. 'MyghtyCssLexer', 'MyghtyJavascriptLexer', 'MasonLexer', 'MakoLexer',
  29. 'MakoHtmlLexer', 'MakoXmlLexer', 'MakoJavascriptLexer',
  30. 'MakoCssLexer', 'JspLexer', 'CheetahLexer', 'CheetahHtmlLexer',
  31. 'CheetahXmlLexer', 'CheetahJavascriptLexer',
  32. 'EvoqueLexer', 'EvoqueHtmlLexer', 'EvoqueXmlLexer',
  33. 'ColdfusionLexer', 'ColdfusionHtmlLexer',
  34. 'VelocityLexer', 'VelocityHtmlLexer', 'VelocityXmlLexer',
  35. 'SspLexer']
  36. class ErbLexer(Lexer):
  37. """
  38. Generic `ERB <http://ruby-doc.org/core/classes/ERB.html>`_ (Ruby Templating)
  39. lexer.
  40. Just highlights ruby code between the preprocessor directives, other data
  41. is left untouched by the lexer.
  42. All options are also forwarded to the `RubyLexer`.
  43. """
  44. name = 'ERB'
  45. aliases = ['erb']
  46. mimetypes = ['application/x-ruby-templating']
  47. _block_re = re.compile(r'(<%%|%%>|<%=|<%#|<%-|<%|-%>|%>|^%[^%].*?$)', re.M)
  48. def __init__(self, **options):
  49. from plushcms.syntaxhighlight.pygments.lexers.agile import RubyLexer
  50. self.ruby_lexer = RubyLexer(**options)
  51. Lexer.__init__(self, **options)
  52. def get_tokens_unprocessed(self, text):
  53. """
  54. Since ERB doesn't allow "<%" and other tags inside of ruby
  55. blocks we have to use a split approach here that fails for
  56. that too.
  57. """
  58. tokens = self._block_re.split(text)
  59. tokens.reverse()
  60. state = idx = 0
  61. try:
  62. while True:
  63. # text
  64. if state == 0:
  65. val = tokens.pop()
  66. yield idx, Other, val
  67. idx += len(val)
  68. state = 1
  69. # block starts
  70. elif state == 1:
  71. tag = tokens.pop()
  72. # literals
  73. if tag in ('<%%', '%%>'):
  74. yield idx, Other, tag
  75. idx += 3
  76. state = 0
  77. # comment
  78. elif tag == '<%#':
  79. yield idx, Comment.Preproc, tag
  80. val = tokens.pop()
  81. yield idx + 3, Comment, val
  82. idx += 3 + len(val)
  83. state = 2
  84. # blocks or output
  85. elif tag in ('<%', '<%=', '<%-'):
  86. yield idx, Comment.Preproc, tag
  87. idx += len(tag)
  88. data = tokens.pop()
  89. r_idx = 0
  90. for r_idx, r_token, r_value in \
  91. self.ruby_lexer.get_tokens_unprocessed(data):
  92. yield r_idx + idx, r_token, r_value
  93. idx += len(data)
  94. state = 2
  95. elif tag in ('%>', '-%>'):
  96. yield idx, Error, tag
  97. idx += len(tag)
  98. state = 0
  99. # % raw ruby statements
  100. else:
  101. yield idx, Comment.Preproc, tag[0]
  102. r_idx = 0
  103. for r_idx, r_token, r_value in \
  104. self.ruby_lexer.get_tokens_unprocessed(tag[1:]):
  105. yield idx + 1 + r_idx, r_token, r_value
  106. idx += len(tag)
  107. state = 0
  108. # block ends
  109. elif state == 2:
  110. tag = tokens.pop()
  111. if tag not in ('%>', '-%>'):
  112. yield idx, Other, tag
  113. else:
  114. yield idx, Comment.Preproc, tag
  115. idx += len(tag)
  116. state = 0
  117. except IndexError:
  118. return
  119. def analyse_text(text):
  120. if '<%' in text and '%>' in text:
  121. return 0.4
  122. class SmartyLexer(RegexLexer):
  123. """
  124. Generic `Smarty <http://smarty.php.net/>`_ template lexer.
  125. Just highlights smarty code between the preprocessor directives, other
  126. data is left untouched by the lexer.
  127. """
  128. name = 'Smarty'
  129. aliases = ['smarty']
  130. filenames = ['*.tpl']
  131. mimetypes = ['application/x-smarty']
  132. flags = re.MULTILINE | re.DOTALL
  133. tokens = {
  134. 'root': [
  135. (r'[^{]+', Other),
  136. (r'(\{)(\*.*?\*)(\})',
  137. bygroups(Comment.Preproc, Comment, Comment.Preproc)),
  138. (r'(\{php\})(.*?)(\{/php\})',
  139. bygroups(Comment.Preproc, using(PhpLexer, startinline=True),
  140. Comment.Preproc)),
  141. (r'(\{)(/?[a-zA-Z_][a-zA-Z0-9_]*)(\s*)',
  142. bygroups(Comment.Preproc, Name.Function, Text), 'smarty'),
  143. (r'\{', Comment.Preproc, 'smarty')
  144. ],
  145. 'smarty': [
  146. (r'\s+', Text),
  147. (r'\}', Comment.Preproc, '#pop'),
  148. (r'#[a-zA-Z_][a-zA-Z0-9_]*#', Name.Variable),
  149. (r'\$[a-zA-Z_][a-zA-Z0-9_]*(\.[a-zA-Z0-9_]+)*', Name.Variable),
  150. (r'[~!%^&*()+=|\[\]:;,.<>/?{}@-]', Operator),
  151. ('(true|false|null)\b', Keyword.Constant),
  152. (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
  153. r"0[xX][0-9a-fA-F]+[Ll]?", Number),
  154. (r'"(\\\\|\\"|[^"])*"', String.Double),
  155. (r"'(\\\\|\\'|[^'])*'", String.Single),
  156. (r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Attribute)
  157. ]
  158. }
  159. def analyse_text(text):
  160. rv = 0.0
  161. if re.search('\{if\s+.*?\}.*?\{/if\}', text):
  162. rv += 0.15
  163. if re.search('\{include\s+file=.*?\}', text):
  164. rv += 0.15
  165. if re.search('\{foreach\s+.*?\}.*?\{/foreach\}', text):
  166. rv += 0.15
  167. if re.search('\{\$.*?\}', text):
  168. rv += 0.01
  169. return rv
  170. class VelocityLexer(RegexLexer):
  171. """
  172. Generic `Velocity <http://velocity.apache.org/>`_ template lexer.
  173. Just highlights velocity directives and variable references, other
  174. data is left untouched by the lexer.
  175. """
  176. name = 'Velocity'
  177. aliases = ['velocity']
  178. filenames = ['*.vm','*.fhtml']
  179. flags = re.MULTILINE | re.DOTALL
  180. identifier = r'[a-zA-Z_][a-zA-Z0-9_]*'
  181. tokens = {
  182. 'root': [
  183. (r'[^{#$]+', Other),
  184. (r'(#)(\*.*?\*)(#)',
  185. bygroups(Comment.Preproc, Comment, Comment.Preproc)),
  186. (r'(##)(.*?$)',
  187. bygroups(Comment.Preproc, Comment)),
  188. (r'(#\{?)(' + identifier + r')(\}?)(\s?\()',
  189. bygroups(Comment.Preproc, Name.Function, Comment.Preproc, Punctuation),
  190. 'directiveparams'),
  191. (r'(#\{?)(' + identifier + r')(\}|\b)',
  192. bygroups(Comment.Preproc, Name.Function, Comment.Preproc)),
  193. (r'\$\{?', Punctuation, 'variable')
  194. ],
  195. 'variable': [
  196. (identifier, Name.Variable),
  197. (r'\(', Punctuation, 'funcparams'),
  198. (r'(\.)(' + identifier + r')', bygroups(Punctuation, Name.Variable), '#push'),
  199. (r'\}', Punctuation, '#pop'),
  200. (r'', Other, '#pop')
  201. ],
  202. 'directiveparams': [
  203. (r'(&&|\|\||==?|!=?|[-<>+*%&\|\^/])|\b(eq|ne|gt|lt|ge|le|not|in)\b', Operator),
  204. (r'\[', Operator, 'rangeoperator'),
  205. (r'\b' + identifier + r'\b', Name.Function),
  206. include('funcparams')
  207. ],
  208. 'rangeoperator': [
  209. (r'\.\.', Operator),
  210. include('funcparams'),
  211. (r'\]', Operator, '#pop')
  212. ],
  213. 'funcparams': [
  214. (r'\$\{?', Punctuation, 'variable'),
  215. (r'\s+', Text),
  216. (r',', Punctuation),
  217. (r'"(\\\\|\\"|[^"])*"', String.Double),
  218. (r"'(\\\\|\\'|[^'])*'", String.Single),
  219. (r"0[xX][0-9a-fA-F]+[Ll]?", Number),
  220. (r"\b[0-9]+\b", Number),
  221. (r'(true|false|null)\b', Keyword.Constant),
  222. (r'\(', Punctuation, '#push'),
  223. (r'\)', Punctuation, '#pop')
  224. ]
  225. }
  226. def analyse_text(text):
  227. rv = 0.0
  228. if re.search(r'#\{?macro\}?\(.*?\).*?#\{?end\}?', text):
  229. rv += 0.25
  230. if re.search(r'#\{?if\}?\(.+?\).*?#\{?end\}?', text):
  231. rv += 0.15
  232. if re.search(r'#\{?foreach\}?\(.+?\).*?#\{?end\}?', text):
  233. rv += 0.15
  234. if re.search(r'\$\{?[a-zA-Z_][a-zA-Z0-9_]*(\([^)]*\))?(\.[a-zA-Z0-9_]+(\([^)]*\))?)*\}?', text):
  235. rv += 0.01
  236. return rv
  237. class VelocityHtmlLexer(DelegatingLexer):
  238. """
  239. Subclass of the `VelocityLexer` that highlights unlexer data
  240. with the `HtmlLexer`.
  241. """
  242. name = 'HTML+Velocity'
  243. aliases = ['html+velocity']
  244. alias_filenames = ['*.html','*.fhtml']
  245. mimetypes = ['text/html+velocity']
  246. def __init__(self, **options):
  247. super(VelocityHtmlLexer, self).__init__(HtmlLexer, VelocityLexer,
  248. **options)
  249. class VelocityXmlLexer(DelegatingLexer):
  250. """
  251. Subclass of the `VelocityLexer` that highlights unlexer data
  252. with the `XmlLexer`.
  253. """
  254. name = 'XML+Velocity'
  255. aliases = ['xml+velocity']
  256. alias_filenames = ['*.xml','*.vm']
  257. mimetypes = ['application/xml+velocity']
  258. def __init__(self, **options):
  259. super(VelocityXmlLexer, self).__init__(XmlLexer, VelocityLexer,
  260. **options)
  261. def analyse_text(text):
  262. rv = VelocityLexer.analyse_text(text) - 0.01
  263. if looks_like_xml(text):
  264. rv += 0.5
  265. return rv
  266. class DjangoLexer(RegexLexer):
  267. """
  268. Generic `django <http://www.djangoproject.com/documentation/templates/>`_
  269. and `jinja <http://wsgiarea.pocoo.org/jinja/>`_ template lexer.
  270. It just highlights django/jinja code between the preprocessor directives,
  271. other data is left untouched by the lexer.
  272. """
  273. name = 'Django/Jinja'
  274. aliases = ['django', 'jinja']
  275. mimetypes = ['application/x-django-templating', 'application/x-jinja']
  276. flags = re.M | re.S
  277. tokens = {
  278. 'root': [
  279. (r'[^{]+', Other),
  280. (r'\{\{', Comment.Preproc, 'var'),
  281. # jinja/django comments
  282. (r'\{[*#].*?[*#]\}', Comment),
  283. # django comments
  284. (r'(\{%)(-?\s*)(comment)(\s*-?)(%\})(.*?)'
  285. r'(\{%)(-?\s*)(endcomment)(\s*-?)(%\})',
  286. bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
  287. Comment, Comment.Preproc, Text, Keyword, Text,
  288. Comment.Preproc)),
  289. # raw jinja blocks
  290. (r'(\{%)(-?\s*)(raw)(\s*-?)(%\})(.*?)'
  291. r'(\{%)(-?\s*)(endraw)(\s*-?)(%\})',
  292. bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
  293. Text, Comment.Preproc, Text, Keyword, Text,
  294. Comment.Preproc)),
  295. # filter blocks
  296. (r'(\{%)(-?\s*)(filter)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)',
  297. bygroups(Comment.Preproc, Text, Keyword, Text, Name.Function),
  298. 'block'),
  299. (r'(\{%)(-?\s*)([a-zA-Z_][a-zA-Z0-9_]*)',
  300. bygroups(Comment.Preproc, Text, Keyword), 'block'),
  301. (r'\{', Other)
  302. ],
  303. 'varnames': [
  304. (r'(\|)(\s*)([a-zA-Z_][a-zA-Z0-9_]*)',
  305. bygroups(Operator, Text, Name.Function)),
  306. (r'(is)(\s+)(not)?(\s+)?([a-zA-Z_][a-zA-Z0-9_]*)',
  307. bygroups(Keyword, Text, Keyword, Text, Name.Function)),
  308. (r'(_|true|false|none|True|False|None)\b', Keyword.Pseudo),
  309. (r'(in|as|reversed|recursive|not|and|or|is|if|else|import|'
  310. r'with(?:(?:out)?\s*context)?|scoped|ignore\s+missing)\b',
  311. Keyword),
  312. (r'(loop|block|super|forloop)\b', Name.Builtin),
  313. (r'[a-zA-Z][a-zA-Z0-9_-]*', Name.Variable),
  314. (r'\.[a-zA-Z0-9_]+', Name.Variable),
  315. (r':?"(\\\\|\\"|[^"])*"', String.Double),
  316. (r":?'(\\\\|\\'|[^'])*'", String.Single),
  317. (r'([{}()\[\]+\-*/,:~]|[><=]=?)', Operator),
  318. (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
  319. r"0[xX][0-9a-fA-F]+[Ll]?", Number),
  320. ],
  321. 'var': [
  322. (r'\s+', Text),
  323. (r'(-?)(\}\})', bygroups(Text, Comment.Preproc), '#pop'),
  324. include('varnames')
  325. ],
  326. 'block': [
  327. (r'\s+', Text),
  328. (r'(-?)(%\})', bygroups(Text, Comment.Preproc), '#pop'),
  329. include('varnames'),
  330. (r'.', Punctuation)
  331. ]
  332. }
  333. def analyse_text(text):
  334. rv = 0.0
  335. if re.search(r'\{%\s*(block|extends)', text) is not None:
  336. rv += 0.4
  337. if re.search(r'\{%\s*if\s*.*?%\}', text) is not None:
  338. rv += 0.1
  339. if re.search(r'\{\{.*?\}\}', text) is not None:
  340. rv += 0.1
  341. return rv
  342. class MyghtyLexer(RegexLexer):
  343. """
  344. Generic `myghty templates`_ lexer. Code that isn't Myghty
  345. markup is yielded as `Token.Other`.
  346. *New in Pygments 0.6.*
  347. .. _myghty templates: http://www.myghty.org/
  348. """
  349. name = 'Myghty'
  350. aliases = ['myghty']
  351. filenames = ['*.myt', 'autodelegate']
  352. mimetypes = ['application/x-myghty']
  353. tokens = {
  354. 'root': [
  355. (r'\s+', Text),
  356. (r'(<%(def|method))(\s*)(.*?)(>)(.*?)(</%\2\s*>)(?s)',
  357. bygroups(Name.Tag, None, Text, Name.Function, Name.Tag,
  358. using(this), Name.Tag)),
  359. (r'(<%(\w+))(.*?)(>)(.*?)(</%\2\s*>)(?s)',
  360. bygroups(Name.Tag, None, Name.Function, Name.Tag,
  361. using(PythonLexer), Name.Tag)),
  362. (r'(<&[^|])(.*?)(,.*?)?(&>)',
  363. bygroups(Name.Tag, Name.Function, using(PythonLexer), Name.Tag)),
  364. (r'(<&\|)(.*?)(,.*?)?(&>)(?s)',
  365. bygroups(Name.Tag, Name.Function, using(PythonLexer), Name.Tag)),
  366. (r'</&>', Name.Tag),
  367. (r'(<%!?)(.*?)(%>)(?s)',
  368. bygroups(Name.Tag, using(PythonLexer), Name.Tag)),
  369. (r'(?<=^)#[^\n]*(\n|\Z)', Comment),
  370. (r'(?<=^)(%)([^\n]*)(\n|\Z)',
  371. bygroups(Name.Tag, using(PythonLexer), Other)),
  372. (r"""(?sx)
  373. (.+?) # anything, followed by:
  374. (?:
  375. (?<=\n)(?=[%#]) | # an eval or comment line
  376. (?=</?[%&]) | # a substitution or block or
  377. # call start or end
  378. # - don't consume
  379. (\\\n) | # an escaped newline
  380. \Z # end of string
  381. )""", bygroups(Other, Operator)),
  382. ]
  383. }
  384. class MyghtyHtmlLexer(DelegatingLexer):
  385. """
  386. Subclass of the `MyghtyLexer` that highlights unlexer data
  387. with the `HtmlLexer`.
  388. *New in Pygments 0.6.*
  389. """
  390. name = 'HTML+Myghty'
  391. aliases = ['html+myghty']
  392. mimetypes = ['text/html+myghty']
  393. def __init__(self, **options):
  394. super(MyghtyHtmlLexer, self).__init__(HtmlLexer, MyghtyLexer,
  395. **options)
  396. class MyghtyXmlLexer(DelegatingLexer):
  397. """
  398. Subclass of the `MyghtyLexer` that highlights unlexer data
  399. with the `XmlLexer`.
  400. *New in Pygments 0.6.*
  401. """
  402. name = 'XML+Myghty'
  403. aliases = ['xml+myghty']
  404. mimetypes = ['application/xml+myghty']
  405. def __init__(self, **options):
  406. super(MyghtyXmlLexer, self).__init__(XmlLexer, MyghtyLexer,
  407. **options)
  408. class MyghtyJavascriptLexer(DelegatingLexer):
  409. """
  410. Subclass of the `MyghtyLexer` that highlights unlexer data
  411. with the `JavascriptLexer`.
  412. *New in Pygments 0.6.*
  413. """
  414. name = 'JavaScript+Myghty'
  415. aliases = ['js+myghty', 'javascript+myghty']
  416. mimetypes = ['application/x-javascript+myghty',
  417. 'text/x-javascript+myghty',
  418. 'text/javascript+mygthy']
  419. def __init__(self, **options):
  420. super(MyghtyJavascriptLexer, self).__init__(JavascriptLexer,
  421. MyghtyLexer, **options)
  422. class MyghtyCssLexer(DelegatingLexer):
  423. """
  424. Subclass of the `MyghtyLexer` that highlights unlexer data
  425. with the `CssLexer`.
  426. *New in Pygments 0.6.*
  427. """
  428. name = 'CSS+Myghty'
  429. aliases = ['css+myghty']
  430. mimetypes = ['text/css+myghty']
  431. def __init__(self, **options):
  432. super(MyghtyCssLexer, self).__init__(CssLexer, MyghtyLexer,
  433. **options)
  434. class MasonLexer(RegexLexer):
  435. """
  436. Generic `mason templates`_ lexer. Stolen from Myghty lexer. Code that isn't
  437. Mason markup is HTML.
  438. .. _mason templates: http://www.masonhq.com/
  439. *New in Pygments 1.4.*
  440. """
  441. name = 'Mason'
  442. aliases = ['mason']
  443. filenames = ['*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler']
  444. mimetypes = ['application/x-mason']
  445. tokens = {
  446. 'root': [
  447. (r'\s+', Text),
  448. (r'(<%doc>)(.*?)(</%doc>)(?s)',
  449. bygroups(Name.Tag, Comment.Multiline, Name.Tag)),
  450. (r'(<%(def|method))(\s*)(.*?)(>)(.*?)(</%\2\s*>)(?s)',
  451. bygroups(Name.Tag, None, Text, Name.Function, Name.Tag,
  452. using(this), Name.Tag)),
  453. (r'(<%(\w+))(.*?)(>)(.*?)(</%\2\s*>)(?s)',
  454. bygroups(Name.Tag, None, Name.Function, Name.Tag,
  455. using(PerlLexer), Name.Tag)),
  456. (r'(<&[^|])(.*?)(,.*?)?(&>)(?s)',
  457. bygroups(Name.Tag, Name.Function, using(PerlLexer), Name.Tag)),
  458. (r'(<&\|)(.*?)(,.*?)?(&>)(?s)',
  459. bygroups(Name.Tag, Name.Function, using(PerlLexer), Name.Tag)),
  460. (r'</&>', Name.Tag),
  461. (r'(<%!?)(.*?)(%>)(?s)',
  462. bygroups(Name.Tag, using(PerlLexer), Name.Tag)),
  463. (r'(?<=^)#[^\n]*(\n|\Z)', Comment),
  464. (r'(?<=^)(%)([^\n]*)(\n|\Z)',
  465. bygroups(Name.Tag, using(PerlLexer), Other)),
  466. (r"""(?sx)
  467. (.+?) # anything, followed by:
  468. (?:
  469. (?<=\n)(?=[%#]) | # an eval or comment line
  470. (?=</?[%&]) | # a substitution or block or
  471. # call start or end
  472. # - don't consume
  473. (\\\n) | # an escaped newline
  474. \Z # end of string
  475. )""", bygroups(using(HtmlLexer), Operator)),
  476. ]
  477. }
  478. def analyse_text(text):
  479. rv = 0.0
  480. if re.search('<&', text) is not None:
  481. rv = 1.0
  482. return rv
  483. class MakoLexer(RegexLexer):
  484. """
  485. Generic `mako templates`_ lexer. Code that isn't Mako
  486. markup is yielded as `Token.Other`.
  487. *New in Pygments 0.7.*
  488. .. _mako templates: http://www.makotemplates.org/
  489. """
  490. name = 'Mako'
  491. aliases = ['mako']
  492. filenames = ['*.mao']
  493. mimetypes = ['application/x-mako']
  494. tokens = {
  495. 'root': [
  496. (r'(\s*)(%)(\s*end(?:\w+))(\n|\Z)',
  497. bygroups(Text, Comment.Preproc, Keyword, Other)),
  498. (r'(\s*)(%)([^\n]*)(\n|\Z)',
  499. bygroups(Text, Comment.Preproc, using(PythonLexer), Other)),
  500. (r'(\s*)(##[^\n]*)(\n|\Z)',
  501. bygroups(Text, Comment.Preproc, Other)),
  502. (r'(?s)<%doc>.*?</%doc>', Comment.Preproc),
  503. (r'(<%)([\w\.\:]+)',
  504. bygroups(Comment.Preproc, Name.Builtin), 'tag'),
  505. (r'(</%)([\w\.\:]+)(>)',
  506. bygroups(Comment.Preproc, Name.Builtin, Comment.Preproc)),
  507. (r'<%(?=([\w\.\:]+))', Comment.Preproc, 'ondeftags'),
  508. (r'(<%(?:!?))(.*?)(%>)(?s)',
  509. bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
  510. (r'(\$\{)(.*?)(\})',
  511. bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
  512. (r'''(?sx)
  513. (.+?) # anything, followed by:
  514. (?:
  515. (?<=\n)(?=%|\#\#) | # an eval or comment line
  516. (?=\#\*) | # multiline comment
  517. (?=</?%) | # a python block
  518. # call start or end
  519. (?=\$\{) | # a substitution
  520. (?<=\n)(?=\s*%) |
  521. # - don't consume
  522. (\\\n) | # an escaped newline
  523. \Z # end of string
  524. )
  525. ''', bygroups(Other, Operator)),
  526. (r'\s+', Text),
  527. ],
  528. 'ondeftags': [
  529. (r'<%', Comment.Preproc),
  530. (r'(?<=<%)(include|inherit|namespace|page)', Name.Builtin),
  531. include('tag'),
  532. ],
  533. 'tag': [
  534. (r'((?:\w+)\s*=)\s*(".*?")',
  535. bygroups(Name.Attribute, String)),
  536. (r'/?\s*>', Comment.Preproc, '#pop'),
  537. (r'\s+', Text),
  538. ],
  539. 'attr': [
  540. ('".*?"', String, '#pop'),
  541. ("'.*?'", String, '#pop'),
  542. (r'[^\s>]+', String, '#pop'),
  543. ],
  544. }
  545. class MakoHtmlLexer(DelegatingLexer):
  546. """
  547. Subclass of the `MakoLexer` that highlights unlexed data
  548. with the `HtmlLexer`.
  549. *New in Pygments 0.7.*
  550. """
  551. name = 'HTML+Mako'
  552. aliases = ['html+mako']
  553. mimetypes = ['text/html+mako']
  554. def __init__(self, **options):
  555. super(MakoHtmlLexer, self).__init__(HtmlLexer, MakoLexer,
  556. **options)
  557. class MakoXmlLexer(DelegatingLexer):
  558. """
  559. Subclass of the `MakoLexer` that highlights unlexer data
  560. with the `XmlLexer`.
  561. *New in Pygments 0.7.*
  562. """
  563. name = 'XML+Mako'
  564. aliases = ['xml+mako']
  565. mimetypes = ['application/xml+mako']
  566. def __init__(self, **options):
  567. super(MakoXmlLexer, self).__init__(XmlLexer, MakoLexer,
  568. **options)
  569. class MakoJavascriptLexer(DelegatingLexer):
  570. """
  571. Subclass of the `MakoLexer` that highlights unlexer data
  572. with the `JavascriptLexer`.
  573. *New in Pygments 0.7.*
  574. """
  575. name = 'JavaScript+Mako'
  576. aliases = ['js+mako', 'javascript+mako']
  577. mimetypes = ['application/x-javascript+mako',
  578. 'text/x-javascript+mako',
  579. 'text/javascript+mako']
  580. def __init__(self, **options):
  581. super(MakoJavascriptLexer, self).__init__(JavascriptLexer,
  582. MakoLexer, **options)
  583. class MakoCssLexer(DelegatingLexer):
  584. """
  585. Subclass of the `MakoLexer` that highlights unlexer data
  586. with the `CssLexer`.
  587. *New in Pygments 0.7.*
  588. """
  589. name = 'CSS+Mako'
  590. aliases = ['css+mako']
  591. mimetypes = ['text/css+mako']
  592. def __init__(self, **options):
  593. super(MakoCssLexer, self).__init__(CssLexer, MakoLexer,
  594. **options)
  595. # Genshi and Cheetah lexers courtesy of Matt Good.
  596. class CheetahPythonLexer(Lexer):
  597. """
  598. Lexer for handling Cheetah's special $ tokens in Python syntax.
  599. """
  600. def get_tokens_unprocessed(self, text):
  601. pylexer = PythonLexer(**self.options)
  602. for pos, type_, value in pylexer.get_tokens_unprocessed(text):
  603. if type_ == Token.Error and value == '$':
  604. type_ = Comment.Preproc
  605. yield pos, type_, value
  606. class CheetahLexer(RegexLexer):
  607. """
  608. Generic `cheetah templates`_ lexer. Code that isn't Cheetah
  609. markup is yielded as `Token.Other`. This also works for
  610. `spitfire templates`_ which use the same syntax.
  611. .. _cheetah templates: http://www.cheetahtemplate.org/
  612. .. _spitfire templates: http://code.google.com/p/spitfire/
  613. """
  614. name = 'Cheetah'
  615. aliases = ['cheetah', 'spitfire']
  616. filenames = ['*.tmpl', '*.spt']
  617. mimetypes = ['application/x-cheetah', 'application/x-spitfire']
  618. tokens = {
  619. 'root': [
  620. (r'(##[^\n]*)$',
  621. (bygroups(Comment))),
  622. (r'#[*](.|\n)*?[*]#', Comment),
  623. (r'#end[^#\n]*(?:#|$)', Comment.Preproc),
  624. (r'#slurp$', Comment.Preproc),
  625. (r'(#[a-zA-Z]+)([^#\n]*)(#|$)',
  626. (bygroups(Comment.Preproc, using(CheetahPythonLexer),
  627. Comment.Preproc))),
  628. # TODO support other Python syntax like $foo['bar']
  629. (r'(\$)([a-zA-Z_][a-zA-Z0-9_\.]*[a-zA-Z0-9_])',
  630. bygroups(Comment.Preproc, using(CheetahPythonLexer))),
  631. (r'(\$\{!?)(.*?)(\})(?s)',
  632. bygroups(Comment.Preproc, using(CheetahPythonLexer),
  633. Comment.Preproc)),
  634. (r'''(?sx)
  635. (.+?) # anything, followed by:
  636. (?:
  637. (?=[#][#a-zA-Z]*) | # an eval comment
  638. (?=\$[a-zA-Z_{]) | # a substitution
  639. \Z # end of string
  640. )
  641. ''', Other),
  642. (r'\s+', Text),
  643. ],
  644. }
  645. class CheetahHtmlLexer(DelegatingLexer):
  646. """
  647. Subclass of the `CheetahLexer` that highlights unlexer data
  648. with the `HtmlLexer`.
  649. """
  650. name = 'HTML+Cheetah'
  651. aliases = ['html+cheetah', 'html+spitfire']
  652. mimetypes = ['text/html+cheetah', 'text/html+spitfire']
  653. def __init__(self, **options):
  654. super(CheetahHtmlLexer, self).__init__(HtmlLexer, CheetahLexer,
  655. **options)
  656. class CheetahXmlLexer(DelegatingLexer):
  657. """
  658. Subclass of the `CheetahLexer` that highlights unlexer data
  659. with the `XmlLexer`.
  660. """
  661. name = 'XML+Cheetah'
  662. aliases = ['xml+cheetah', 'xml+spitfire']
  663. mimetypes = ['application/xml+cheetah', 'application/xml+spitfire']
  664. def __init__(self, **options):
  665. super(CheetahXmlLexer, self).__init__(XmlLexer, CheetahLexer,
  666. **options)
  667. class CheetahJavascriptLexer(DelegatingLexer):
  668. """
  669. Subclass of the `CheetahLexer` that highlights unlexer data
  670. with the `JavascriptLexer`.
  671. """
  672. name = 'JavaScript+Cheetah'
  673. aliases = ['js+cheetah', 'javascript+cheetah',
  674. 'js+spitfire', 'javascript+spitfire']
  675. mimetypes = ['application/x-javascript+cheetah',
  676. 'text/x-javascript+cheetah',
  677. 'text/javascript+cheetah',
  678. 'application/x-javascript+spitfire',
  679. 'text/x-javascript+spitfire',
  680. 'text/javascript+spitfire']
  681. def __init__(self, **options):
  682. super(CheetahJavascriptLexer, self).__init__(JavascriptLexer,
  683. CheetahLexer, **options)
  684. class GenshiTextLexer(RegexLexer):
  685. """
  686. A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ text
  687. templates.
  688. """
  689. name = 'Genshi Text'
  690. aliases = ['genshitext']
  691. mimetypes = ['application/x-genshi-text', 'text/x-genshi']
  692. tokens = {
  693. 'root': [
  694. (r'[^#\$\s]+', Other),
  695. (r'^(\s*)(##.*)$', bygroups(Text, Comment)),
  696. (r'^(\s*)(#)', bygroups(Text, Comment.Preproc), 'directive'),
  697. include('variable'),
  698. (r'[#\$\s]', Other),
  699. ],
  700. 'directive': [
  701. (r'\n', Text, '#pop'),
  702. (r'(?:def|for|if)\s+.*', using(PythonLexer), '#pop'),
  703. (r'(choose|when|with)([^\S\n]+)(.*)',
  704. bygroups(Keyword, Text, using(PythonLexer)), '#pop'),
  705. (r'(choose|otherwise)\b', Keyword, '#pop'),
  706. (r'(end\w*)([^\S\n]*)(.*)', bygroups(Keyword, Text, Comment), '#pop'),
  707. ],
  708. 'variable': [
  709. (r'(?<!\$)(\$\{)(.+?)(\})',
  710. bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
  711. (r'(?<!\$)(\$)([a-zA-Z_][a-zA-Z0-9_\.]*)',
  712. Name.Variable),
  713. ]
  714. }
  715. class GenshiMarkupLexer(RegexLexer):
  716. """
  717. Base lexer for Genshi markup, used by `HtmlGenshiLexer` and
  718. `GenshiLexer`.
  719. """
  720. flags = re.DOTALL
  721. tokens = {
  722. 'root': [
  723. (r'[^<\$]+', Other),
  724. (r'(<\?python)(.*?)(\?>)',
  725. bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
  726. # yield style and script blocks as Other
  727. (r'<\s*(script|style)\s*.*?>.*?<\s*/\1\s*>', Other),
  728. (r'<\s*py:[a-zA-Z0-9]+', Name.Tag, 'pytag'),
  729. (r'<\s*[a-zA-Z0-9:]+', Name.Tag, 'tag'),
  730. include('variable'),
  731. (r'[<\$]', Other),
  732. ],
  733. 'pytag': [
  734. (r'\s+', Text),
  735. (r'[a-zA-Z0-9_:-]+\s*=', Name.Attribute, 'pyattr'),
  736. (r'/?\s*>', Name.Tag, '#pop'),
  737. ],
  738. 'pyattr': [
  739. ('(")(.*?)(")', bygroups(String, using(PythonLexer), String), '#pop'),
  740. ("(')(.*?)(')", bygroups(String, using(PythonLexer), String), '#pop'),
  741. (r'[^\s>]+', String, '#pop'),
  742. ],
  743. 'tag': [
  744. (r'\s+', Text),
  745. (r'py:[a-zA-Z0-9_-]+\s*=', Name.Attribute, 'pyattr'),
  746. (r'[a-zA-Z0-9_:-]+\s*=', Name.Attribute, 'attr'),
  747. (r'/?\s*>', Name.Tag, '#pop'),
  748. ],
  749. 'attr': [
  750. ('"', String, 'attr-dstring'),
  751. ("'", String, 'attr-sstring'),
  752. (r'[^\s>]*', String, '#pop')
  753. ],
  754. 'attr-dstring': [
  755. ('"', String, '#pop'),
  756. include('strings'),
  757. ("'", String)
  758. ],
  759. 'attr-sstring': [
  760. ("'", String, '#pop'),
  761. include('strings'),
  762. ("'", String)
  763. ],
  764. 'strings': [
  765. ('[^"\'$]+', String),
  766. include('variable')
  767. ],
  768. 'variable': [
  769. (r'(?<!\$)(\$\{)(.+?)(\})',
  770. bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
  771. (r'(?<!\$)(\$)([a-zA-Z_][a-zA-Z0-9_\.]*)',
  772. Name.Variable),
  773. ]
  774. }
  775. class HtmlGenshiLexer(DelegatingLexer):
  776. """
  777. A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ and
  778. `kid <http://kid-templating.org/>`_ kid HTML templates.
  779. """
  780. name = 'HTML+Genshi'
  781. aliases = ['html+genshi', 'html+kid']
  782. alias_filenames = ['*.html', '*.htm', '*.xhtml']
  783. mimetypes = ['text/html+genshi']
  784. def __init__(self, **options):
  785. super(HtmlGenshiLexer, self).__init__(HtmlLexer, GenshiMarkupLexer,
  786. **options)
  787. def analyse_text(text):
  788. rv = 0.0
  789. if re.search('\$\{.*?\}', text) is not None:
  790. rv += 0.2
  791. if re.search('py:(.*?)=["\']', text) is not None:
  792. rv += 0.2
  793. return rv + HtmlLexer.analyse_text(text) - 0.01
  794. class GenshiLexer(DelegatingLexer):
  795. """
  796. A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ and
  797. `kid <http://kid-templating.org/>`_ kid XML templates.
  798. """
  799. name = 'Genshi'
  800. aliases = ['genshi', 'kid', 'xml+genshi', 'xml+kid']
  801. filenames = ['*.kid']
  802. alias_filenames = ['*.xml']
  803. mimetypes = ['application/x-genshi', 'application/x-kid']
  804. def __init__(self, **options):
  805. super(GenshiLexer, self).__init__(XmlLexer, GenshiMarkupLexer,
  806. **options)
  807. def analyse_text(text):
  808. rv = 0.0
  809. if re.search('\$\{.*?\}', text) is not None:
  810. rv += 0.2
  811. if re.search('py:(.*?)=["\']', text) is not None:
  812. rv += 0.2
  813. return rv + XmlLexer.analyse_text(text) - 0.01
  814. class JavascriptGenshiLexer(DelegatingLexer):
  815. """
  816. A lexer that highlights javascript code in genshi text templates.
  817. """
  818. name = 'JavaScript+Genshi Text'
  819. aliases = ['js+genshitext', 'js+genshi', 'javascript+genshitext',
  820. 'javascript+genshi']
  821. alias_filenames = ['*.js']
  822. mimetypes = ['application/x-javascript+genshi',
  823. 'text/x-javascript+genshi',
  824. 'text/javascript+genshi']
  825. def __init__(self, **options):
  826. super(JavascriptGenshiLexer, self).__init__(JavascriptLexer,
  827. GenshiTextLexer,
  828. **options)
  829. def analyse_text(text):
  830. return GenshiLexer.analyse_text(text) - 0.05
  831. class CssGenshiLexer(DelegatingLexer):
  832. """
  833. A lexer that highlights CSS definitions in genshi text templates.
  834. """
  835. name = 'CSS+Genshi Text'
  836. aliases = ['css+genshitext', 'css+genshi']
  837. alias_filenames = ['*.css']
  838. mimetypes = ['text/css+genshi']
  839. def __init__(self, **options):
  840. super(CssGenshiLexer, self).__init__(CssLexer, GenshiTextLexer,
  841. **options)
  842. def analyse_text(text):
  843. return GenshiLexer.analyse_text(text) - 0.05
  844. class RhtmlLexer(DelegatingLexer):
  845. """
  846. Subclass of the ERB lexer that highlights the unlexed data with the
  847. html lexer.
  848. Nested Javascript and CSS is highlighted too.
  849. """
  850. name = 'RHTML'
  851. aliases = ['rhtml', 'html+erb', 'html+ruby']
  852. filenames = ['*.rhtml']
  853. alias_filenames = ['*.html', '*.htm', '*.xhtml']
  854. mimetypes = ['text/html+ruby']
  855. def __init__(self, **options):
  856. super(RhtmlLexer, self).__init__(HtmlLexer, ErbLexer, **options)
  857. def analyse_text(text):
  858. rv = ErbLexer.analyse_text(text) - 0.01
  859. if html_doctype_matches(text):
  860. # one more than the XmlErbLexer returns
  861. rv += 0.5
  862. return rv
  863. class XmlErbLexer(DelegatingLexer):
  864. """
  865. Subclass of `ErbLexer` which highlights data outside preprocessor
  866. directives with the `XmlLexer`.
  867. """
  868. name = 'XML+Ruby'
  869. aliases = ['xml+erb', 'xml+ruby']
  870. alias_filenames = ['*.xml']
  871. mimetypes = ['application/xml+ruby']
  872. def __init__(self, **options):
  873. super(XmlErbLexer, self).__init__(XmlLexer, ErbLexer, **options)
  874. def analyse_text(text):
  875. rv = ErbLexer.analyse_text(text) - 0.01
  876. if looks_like_xml(text):
  877. rv += 0.4
  878. return rv
  879. class CssErbLexer(DelegatingLexer):
  880. """
  881. Subclass of `ErbLexer` which highlights unlexed data with the `CssLexer`.
  882. """
  883. name = 'CSS+Ruby'
  884. aliases = ['css+erb', 'css+ruby']
  885. alias_filenames = ['*.css']
  886. mimetypes = ['text/css+ruby']
  887. def __init__(self, **options):
  888. super(CssErbLexer, self).__init__(CssLexer, ErbLexer, **options)
  889. def analyse_text(text):
  890. return ErbLexer.analyse_text(text) - 0.05
  891. class JavascriptErbLexer(DelegatingLexer):
  892. """
  893. Subclass of `ErbLexer` which highlights unlexed data with the
  894. `JavascriptLexer`.
  895. """
  896. name = 'JavaScript+Ruby'
  897. aliases = ['js+erb', 'javascript+erb', 'js+ruby', 'javascript+ruby']
  898. alias_filenames = ['*.js']
  899. mimetypes = ['application/x-javascript+ruby',
  900. 'text/x-javascript+ruby',
  901. 'text/javascript+ruby']
  902. def __init__(self, **options):
  903. super(JavascriptErbLexer, self).__init__(JavascriptLexer, ErbLexer,
  904. **options)
  905. def analyse_text(text):
  906. return ErbLexer.analyse_text(text) - 0.05
  907. class HtmlPhpLexer(DelegatingLexer):
  908. """
  909. Subclass of `PhpLexer` that highlights unhandled data with the `HtmlLexer`.
  910. Nested Javascript and CSS is highlighted too.
  911. """
  912. name = 'HTML+PHP'
  913. aliases = ['html+php']
  914. filenames = ['*.phtml']
  915. alias_filenames = ['*.php', '*.html', '*.htm', '*.xhtml',
  916. '*.php[345]']
  917. mimetypes = ['application/x-php',
  918. 'application/x-httpd-php', 'application/x-httpd-php3',
  919. 'application/x-httpd-php4', 'application/x-httpd-php5']
  920. def __init__(self, **options):
  921. super(HtmlPhpLexer, self).__init__(HtmlLexer, PhpLexer, **options)
  922. def analyse_text(text):
  923. rv = PhpLexer.analyse_text(text) - 0.01
  924. if html_doctype_matches(text):
  925. rv += 0.5
  926. return rv
  927. class XmlPhpLexer(DelegatingLexer):
  928. """
  929. Subclass of `PhpLexer` that higlights unhandled data with the `XmlLexer`.
  930. """
  931. name = 'XML+PHP'
  932. aliases = ['xml+php']
  933. alias_filenames = ['*.xml', '*.php', '*.php[345]']
  934. mimetypes = ['application/xml+php']
  935. def __init__(self, **options):
  936. super(XmlPhpLexer, self).__init__(XmlLexer, PhpLexer, **options)
  937. def analyse_text(text):
  938. rv = PhpLexer.analyse_text(text) - 0.01
  939. if looks_like_xml(text):
  940. rv += 0.4
  941. return rv
  942. class CssPhpLexer(DelegatingLexer):
  943. """
  944. Subclass of `PhpLexer` which highlights unmatched data with the `CssLexer`.
  945. """
  946. name = 'CSS+PHP'
  947. aliases = ['css+php']
  948. alias_filenames = ['*.css']
  949. mimetypes = ['text/css+php']
  950. def __init__(self, **options):
  951. super(CssPhpLexer, self).__init__(CssLexer, PhpLexer, **options)
  952. def analyse_text(text):
  953. return PhpLexer.analyse_text(text) - 0.05
  954. class JavascriptPhpLexer(DelegatingLexer):
  955. """
  956. Subclass of `PhpLexer` which highlights unmatched data with the
  957. `JavascriptLexer`.
  958. """
  959. name = 'JavaScript+PHP'
  960. aliases = ['js+php', 'javascript+php']
  961. alias_filenames = ['*.js']
  962. mimetypes = ['application/x-javascript+php',
  963. 'text/x-javascript+php',
  964. 'text/javascript+php']
  965. def __init__(self, **options):
  966. super(JavascriptPhpLexer, self).__init__(JavascriptLexer, PhpLexer,
  967. **options)
  968. def analyse_text(text):
  969. return PhpLexer.analyse_text(text)
  970. class HtmlSmartyLexer(DelegatingLexer):
  971. """
  972. Subclass of the `SmartyLexer` that highighlights unlexed data with the
  973. `HtmlLexer`.
  974. Nested Javascript and CSS is highlighted too.
  975. """
  976. name = 'HTML+Smarty'
  977. aliases = ['html+smarty']
  978. alias_filenames = ['*.html', '*.htm', '*.xhtml', '*.tpl']
  979. mimetypes = ['text/html+smarty']
  980. def __init__(self, **options):
  981. super(HtmlSmartyLexer, self).__init__(HtmlLexer, SmartyLexer, **options)
  982. def analyse_text(text):
  983. rv = SmartyLexer.analyse_text(text) - 0.01
  984. if html_doctype_matches(text):
  985. rv += 0.5
  986. return rv
  987. class XmlSmartyLexer(DelegatingLexer):
  988. """
  989. Subclass of the `SmartyLexer` that highlights unlexed data with the
  990. `XmlLexer`.
  991. """
  992. name = 'XML+Smarty'
  993. aliases = ['xml+smarty']
  994. alias_filenames = ['*.xml', '*.tpl']
  995. mimetypes = ['application/xml+smarty']
  996. def __init__(self, **options):
  997. super(XmlSmartyLexer, self).__init__(XmlLexer, SmartyLexer, **options)
  998. def analyse_text(text):
  999. rv = SmartyLexer.analyse_text(text) - 0.01
  1000. if looks_like_xml(text):
  1001. rv += 0.4
  1002. return rv
  1003. class CssSmartyLexer(DelegatingLexer):
  1004. """
  1005. Subclass of the `SmartyLexer` that highlights unlexed data with the
  1006. `CssLexer`.
  1007. """
  1008. name = 'CSS+Smarty'
  1009. aliases = ['css+smarty']
  1010. alias_filenames = ['*.css', '*.tpl']
  1011. mimetypes = ['text/css+smarty']
  1012. def __init__(self, **options):
  1013. super(CssSmartyLexer, self).__init__(CssLexer, SmartyLexer, **options)
  1014. def analyse_text(text):
  1015. return SmartyLexer.analyse_text(text) - 0.05
  1016. class JavascriptSmartyLexer(DelegatingLexer):
  1017. """
  1018. Subclass of the `SmartyLexer` that highlights unlexed data with the
  1019. `JavascriptLexer`.
  1020. """
  1021. name = 'JavaScript+Smarty'
  1022. aliases = ['js+smarty', 'javascript+smarty']
  1023. alias_filenames = ['*.js', '*.tpl']
  1024. mimetypes = ['application/x-javascript+smarty',
  1025. 'text/x-javascript+smarty',
  1026. 'text/javascript+smarty']
  1027. def __init__(self, **options):
  1028. super(JavascriptSmartyLexer, self).__init__(JavascriptLexer, SmartyLexer,
  1029. **options)
  1030. def analyse_text(text):
  1031. return SmartyLexer.analyse_text(text) - 0.05
  1032. class HtmlDjangoLexer(DelegatingLexer):
  1033. """
  1034. Subclass of the `DjangoLexer` that highighlights unlexed data with the
  1035. `HtmlLexer`.
  1036. Nested Javascript and CSS is highlighted too.
  1037. """
  1038. name = 'HTML+Django/Jinja'
  1039. aliases = ['html+django', 'html+jinja']
  1040. alias_filenames = ['*.html', '*.htm', '*.xhtml']
  1041. mimetypes = ['text/html+django', 'text/html+jinja']
  1042. def __init__(self, **options):
  1043. super(HtmlDjangoLexer, self).__init__(HtmlLexer, DjangoLexer, **options)
  1044. def analyse_text(text):
  1045. rv = DjangoLexer.analyse_text(text) - 0.01
  1046. if html_doctype_matches(text):
  1047. rv += 0.5
  1048. return rv
  1049. class XmlDjangoLexer(DelegatingLexer):
  1050. """
  1051. Subclass of the `DjangoLexer` that highlights unlexed data with the
  1052. `XmlLexer`.
  1053. """
  1054. name = 'XML+Django/Jinja'
  1055. aliases = ['xml+django', 'xml+jinja']
  1056. alias_filenames = ['*.xml']
  1057. mimetypes = ['application/xml+django', 'application/xml+jinja']
  1058. def __init__(self, **options):
  1059. super(XmlDjangoLexer, self).__init__(XmlLexer, DjangoLexer, **options)
  1060. def analyse_text(text):
  1061. rv = DjangoLexer.analyse_text(text) - 0.01
  1062. if looks_like_xml(text):
  1063. rv += 0.4
  1064. return rv
  1065. class CssDjangoLexer(DelegatingLexer):
  1066. """
  1067. Subclass of the `DjangoLexer` that highlights unlexed data with the
  1068. `CssLexer`.
  1069. """
  1070. name = 'CSS+Django/Jinja'
  1071. aliases = ['css+django', 'css+jinja']
  1072. alias_filenames = ['*.css']
  1073. mimetypes = ['text/css+django', 'text/css+jinja']
  1074. def __init__(self, **options):
  1075. super(CssDjangoLexer, self).__init__(CssLexer, DjangoLexer, **options)
  1076. def analyse_text(text):
  1077. return DjangoLexer.analyse_text(text) - 0.05
  1078. class JavascriptDjangoLexer(DelegatingLexer):
  1079. """
  1080. Subclass of the `DjangoLexer` that highlights unlexed data with the
  1081. `JavascriptLexer`.
  1082. """
  1083. name = 'JavaScript+Django/Jinja'
  1084. aliases = ['js+django', 'javascript+django',
  1085. 'js+jinja', 'javascript+jinja']
  1086. alias_filenames = ['*.js']
  1087. mimetypes = ['application/x-javascript+django',
  1088. 'application/x-javascript+jinja',
  1089. 'text/x-javascript+django',
  1090. 'text/x-javascript+jinja',
  1091. 'text/javascript+django',
  1092. 'text/javascript+jinja']
  1093. def __init__(self, **options):
  1094. super(JavascriptDjangoLexer, self).__init__(JavascriptLexer, DjangoLexer,
  1095. **options)
  1096. def analyse_text(text):
  1097. return DjangoLexer.analyse_text(text) - 0.05
  1098. class JspRootLexer(RegexLexer):
  1099. """
  1100. Base for the `JspLexer`. Yields `Token.Other` for area outside of
  1101. JSP tags.
  1102. *New in Pygments 0.7.*
  1103. """
  1104. tokens = {
  1105. 'root': [
  1106. (r'<%\S?', Keyword, 'sec'),
  1107. # FIXME: I want to make these keywords but still parse attributes.
  1108. (r'</?jsp:(forward|getProperty|include|plugin|setProperty|useBean).*?>',
  1109. Keyword),
  1110. (r'[^<]+', Other),
  1111. (r'<', Other),
  1112. ],
  1113. 'sec': [
  1114. (r'%>', Keyword, '#pop'),
  1115. # note: '\w\W' != '.' without DOTALL.
  1116. (r'[\w\W]+?(?=%>|\Z)', using(JavaLexer)),
  1117. ],
  1118. }
  1119. class JspLexer(DelegatingLexer):
  1120. """
  1121. Lexer for Java Server Pages.
  1122. *New in Pygments 0.7.*
  1123. """
  1124. name = 'Java Server Page'
  1125. aliases = ['jsp']
  1126. filenames = ['*.jsp']
  1127. mimetypes = ['application/x-jsp']
  1128. def __init__(self, **options):
  1129. super(JspLexer, self).__init__(XmlLexer, JspRootLexer, **options)
  1130. def analyse_text(text):
  1131. rv = JavaLexer.analyse_text(text) - 0.01
  1132. if looks_like_xml(text):
  1133. rv += 0.4
  1134. if '<%' in text and '%>' in text:
  1135. rv += 0.1
  1136. return rv
  1137. class EvoqueLexer(RegexLexer):
  1138. """
  1139. For files using the Evoque templating system.
  1140. *New in Pygments 1.1.*
  1141. """
  1142. name = 'Evoque'
  1143. aliases = ['evoque']
  1144. filenames = ['*.evoque']
  1145. mimetypes = ['application/x-evoque']
  1146. flags = re.DOTALL
  1147. tokens = {
  1148. 'root': [
  1149. (r'[^#$]+', Other),
  1150. (r'#\[', Comment.Multiline, 'comment'),
  1151. (r'\$\$', Other),
  1152. # svn keywords
  1153. (r'\$\w+:[^$\n]*\$', Comment.Multiline),
  1154. # directives: begin, end
  1155. (r'(\$)(begin|end)(\{(%)?)(.*?)((?(4)%)\})',
  1156. bygroups(Punctuation, Name.Builtin, Punctuation, None,
  1157. String, Punctuation, None)),
  1158. # directives: evoque, overlay
  1159. # see doc for handling first name arg: /directives/evoque/
  1160. #+ minor inconsistency: the "name" in e.g. $overlay{name=site_base}
  1161. # should be using(PythonLexer), not passed out as String
  1162. (r'(\$)(evoque|overlay)(\{(%)?)(\s*[#\w\-"\'.]+[^=,%}]+?)?'
  1163. r'(.*?)((?(4)%)\})',
  1164. bygroups(Punctuation, Name.Builtin, Punctuation, None,
  1165. String, using(PythonLexer), Punctuation, None)),
  1166. # directives: if, for, prefer, test
  1167. (r'(\$)(\w+)(\{(%)?)(.*?)((?(4)%)\})',
  1168. bygroups(Punctuation, Name.Builtin, Punctuation, None,
  1169. using(PythonLexer), Punctuation, None)),
  1170. # directive clauses (no {} expression)
  1171. (r'(\$)(else|rof|fi)', bygroups(Punctuation, Name.Builtin)),
  1172. # expressions
  1173. (r'(\$\{(%)?)(.*?)((!)(.*?))?((?(2)%)\})',
  1174. bygroups(Punctuation, None, using(PythonLexer),
  1175. Name.Builtin, None, None, Punctuation, None)),
  1176. (r'#', Other),
  1177. ],
  1178. 'comment': [
  1179. (r'[^\]#]', Comment.Multiline),
  1180. (r'#\[', Comment.Multiline, '#push'),
  1181. (r'\]#', Comment.Multiline, '#pop'),
  1182. (r'[\]#]', Comment.Multiline)
  1183. ],
  1184. }
  1185. class EvoqueHtmlLexer(DelegatingLexer):
  1186. """
  1187. Subclass of the `EvoqueLexer` that highlights unlexed data with the
  1188. `HtmlLexer`.
  1189. *New in Pygments 1.1.*
  1190. """
  1191. name = 'HTML+Evoque'
  1192. aliases = ['html+evoque']
  1193. filenames = ['*.html']
  1194. mimetypes = ['text/html+evoque']
  1195. def __init__(self, **options):
  1196. super(EvoqueHtmlLexer, self).__init__(HtmlLexer, EvoqueLexer,
  1197. **options)
  1198. class EvoqueXmlLexer(DelegatingLexer):
  1199. """
  1200. Subclass of the `EvoqueLexer` that highlights unlexed data with the
  1201. `XmlLexer`.
  1202. *New in Pygments 1.1.*
  1203. """
  1204. name = 'XML+Evoque'
  1205. aliases = ['xml+evoque']
  1206. filenames = ['*.xml']
  1207. mimetypes = ['application/xml+evoque']
  1208. def __init__(self, **options):
  1209. super(EvoqueXmlLexer, self).__init__(XmlLexer, EvoqueLexer,
  1210. **options)
  1211. class ColdfusionLexer(RegexLexer):
  1212. """
  1213. Coldfusion statements
  1214. """
  1215. name = 'cfstatement'
  1216. aliases = ['cfs']
  1217. filenames = []
  1218. mimetypes = []
  1219. flags = re.IGNORECASE | re.MULTILINE
  1220. tokens = {
  1221. 'root': [
  1222. (r'//.*', Comment),
  1223. (r'\+\+|--', Operator),
  1224. (r'[-+*/^&=!]', Operator),
  1225. (r'<=|>=|<|>', Operator),
  1226. (r'mod\b', Operator),
  1227. (r'(eq|lt|gt|lte|gte|not|is|and|or)\b', Operator),
  1228. (r'\|\||&&', Operator),
  1229. (r'"', String.Double, 'string'),
  1230. # There is a special rule for allowing html in single quoted
  1231. # strings, evidently.
  1232. (r"'.*?'", String.Single),
  1233. (r'\d+', Number),
  1234. (r'(if|else|len|var|case|default|break|switch)\b', Keyword),
  1235. (r'([A-Za-z_$][A-Za-z0-9_.]*)\s*(\()', bygroups(Name.Function, Punctuation)),
  1236. (r'[A-Za-z_$][A-Za-z0-9_.]*', Name.Variable),
  1237. (r'[()\[\]{};:,.\\]', Punctuation),
  1238. (r'\s+', Text),
  1239. ],
  1240. 'string': [
  1241. (r'""', String.Double),
  1242. (r'#.+?#', String.Interp),
  1243. (r'[^"#]+', String.Double),
  1244. (r'#', String.Double),
  1245. (r'"', String.Double, '#pop'),
  1246. ],
  1247. }
  1248. class ColdfusionMarkupLexer(RegexLexer):
  1249. """
  1250. Coldfusion markup only
  1251. """
  1252. name = 'Coldfusion'
  1253. aliases = ['cf']
  1254. filenames = []
  1255. mimetypes = []
  1256. tokens = {
  1257. 'root': [
  1258. (r'[^<]+', Other),
  1259. include('tags'),
  1260. (r'<[^<>]*', Other),
  1261. ],
  1262. 'tags': [
  1263. (r'(?s)<!---.*?--->', Comment.Multiline),
  1264. (r'(?s)<!--.*?-->', Comment),
  1265. (r'<cfoutput.*?>', Name.Builtin, 'cfoutput'),
  1266. (r'(?s)(<cfscript.*?>)(.+?)(</cfscript.*?>)',
  1267. bygroups(Name.Builtin, using(ColdfusionLexer), Name.Builtin)),
  1268. # negative lookbehind is for strings with embedded >
  1269. (r'(?s)(</?cf(?:component|include|if|else|elseif|loop|return|'
  1270. r'dbinfo|dump|abort|location|invoke|throw|file|savecontent|'
  1271. r'mailpart|mail|header|content|zip|image|lock|argument|try|'
  1272. r'catch|break|directory|http|set|function|param)\b)(.*?)((?<!\\)>)',
  1273. bygroups(Name.Builtin, using(ColdfusionLexer), Name.Builtin)),
  1274. ],
  1275. 'cfoutput': [
  1276. (r'[^#<]+', Other),
  1277. (r'(#)(.*?)(#)', bygroups(Punctuation, using(ColdfusionLexer),
  1278. Punctuation)),
  1279. #(r'<cfoutput.*?>', Name.Builtin, '#push'),
  1280. (r'</cfoutput.*?>', Name.Builtin, '#pop'),
  1281. include('tags'),
  1282. (r'(?s)<[^<>]*', Other),
  1283. (r'#', Other),
  1284. ],
  1285. }
  1286. class ColdfusionHtmlLexer(DelegatingLexer):
  1287. """
  1288. Coldfusion markup in html
  1289. """
  1290. name = 'Coldfusion HTML'
  1291. aliases = ['cfm']
  1292. filenames = ['*.cfm', '*.cfml', '*.cfc']
  1293. mimetypes = ['application/x-coldfusion']
  1294. def __init__(self, **options):
  1295. super(ColdfusionHtmlLexer, self).__init__(HtmlLexer, ColdfusionMarkupLexer,
  1296. **options)
  1297. class SspLexer(DelegatingLexer):
  1298. """
  1299. Lexer for Scalate Server Pages.
  1300. *New in Pygments 1.4.*
  1301. """
  1302. name = 'Scalate Server Page'
  1303. aliases = ['ssp']
  1304. filenames = ['*.ssp']
  1305. mimetypes = ['application/x-ssp']
  1306. def __init__(self, **options):
  1307. super(SspLexer, self).__init__(XmlLexer, JspRootLexer, **options)
  1308. def analyse_text(text):
  1309. rv = 0.0
  1310. if re.search('val \w+\s*:', text):
  1311. rv += 0.6
  1312. if looks_like_xml(text):
  1313. rv += 0.2
  1314. if '<%' in text and '%>' in text:
  1315. rv += 0.1
  1316. return rv