PageRenderTime 59ms CodeModel.GetById 15ms RepoModel.GetById 0ms app.codeStats 0ms

/.config/sublime-text-2/Packages/SublimeCodeIntel/libs/textinfo.py

https://bitbucket.org/ecool/dotfiles
Python | 2000 lines | 1838 code | 29 blank | 133 comment | 117 complexity | 341968e1e11182fd2f1816f7166ba520 MD5 | raw file
Possible License(s): MIT, BSD-3-Clause, MPL-2.0-no-copyleft-exception

Large files files are truncated, but you can click here to view the full file

  1. #!/usr/bin/env python
  2. # ***** BEGIN LICENSE BLOCK *****
  3. # Version: MPL 1.1/GPL 2.0/LGPL 2.1
  4. #
  5. # The contents of this file are subject to the Mozilla Public License
  6. # Version 1.1 (the "License"); you may not use this file except in
  7. # compliance with the License. You may obtain a copy of the License at
  8. # http://www.mozilla.org/MPL/
  9. #
  10. # Software distributed under the License is distributed on an "AS IS"
  11. # basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
  12. # License for the specific language governing rights and limitations
  13. # under the License.
  14. #
  15. # The Original Code is Komodo code.
  16. #
  17. # The Initial Developer of the Original Code is ActiveState Software Inc.
  18. # Portions created by ActiveState Software Inc are Copyright (C) 2000-2007
  19. # ActiveState Software Inc. All Rights Reserved.
  20. #
  21. # Contributor(s):
  22. # ActiveState Software Inc
  23. #
  24. # Alternatively, the contents of this file may be used under the terms of
  25. # either the GNU General Public License Version 2 or later (the "GPL"), or
  26. # the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
  27. # in which case the provisions of the GPL or the LGPL are applicable instead
  28. # of those above. If you wish to allow use of your version of this file only
  29. # under the terms of either the GPL or the LGPL, and not to allow others to
  30. # use your version of this file under the terms of the MPL, indicate your
  31. # decision by deleting the provisions above and replace them with the notice
  32. # and other provisions required by the GPL or the LGPL. If you do not delete
  33. # the provisions above, a recipient may use your version of this file under
  34. # the terms of any one of the MPL, the GPL or the LGPL.
  35. #
  36. # ***** END LICENSE BLOCK *****
  37. r"""Determine information about text files.
  38. This module efficiently determines the encoding of text files (see
  39. _classify_encoding for details), accurately identifies binary files, and
  40. provides detailed meta information of text files.
  41. >>> import textinfo
  42. >>> path = __file__
  43. >>> if path.endswith(".pyc"): path = path[:-1]
  44. >>> ti = textinfo.textinfo_from_path(path)
  45. >>> ti.__class__
  46. <class 'textinfo.TextInfo'>
  47. >>> ti.encoding
  48. 'utf-8'
  49. >>> ti.file_type_name
  50. 'regular file'
  51. >>> ti.is_text
  52. True
  53. >>> ti.lang
  54. 'Python'
  55. >>> ti.langinfo
  56. <Python LangInfo>
  57. ...plus a number of other useful information gleaned from the file. To see
  58. a list of all useful attributes see
  59. >> list(ti.as_dict().keys())
  60. ['encoding', 'file_type', ...]
  61. Note: This module requires at least Python 2.5 to use
  62. `codecs.lookup(<encname>).name`.
  63. """
  64. _cmdln_doc = """Determine information about text files.
  65. """
  66. # TODO:
  67. # - [high prio] prefs integration
  68. # - aggegrate "is there an explicit encoding decl in this file" from XML, HTML,
  69. # lang-specific, emacs and vi vars decls (as discussed with Shane)
  70. # - fix ti with unicode paths Windows (check on Linux too)
  71. # - '-L|--dereference' option a la `file` and `ls`
  72. # - See: http://webblaze.cs.berkeley.edu/2009/content-sniffing/
  73. # - Shift-JIS encoding is not detected for
  74. # http://public.activestate.com/pub/apc/perl-current/lib/Pod/Simple/t/corpus/s2763_sjis.txt
  75. # [Jan wrote]
  76. # > While the document isn't identified by filename extension as POD,
  77. # > it does contain POD and a corresponding =encoding directive.
  78. # Could potentially have a content heuristic check for POD.
  79. #
  80. # ----------------
  81. # Current Komodo (4.2) Encoding Determination Notes (used for reference,
  82. # but not absolutely followed):
  83. #
  84. # Working through koDocumentBase._detectEncoding:
  85. # encoding_name = pref:encodingDefault (on first start is set
  86. # to encoding from locale.getdefaultlocale() typically,
  87. # fallback to iso8859-1; default locale typically ends up being:
  88. # Windows: cp1252
  89. # Mac OS X: mac-roman
  90. # (modern) Linux: UTF-8)
  91. # encoding = the python name for this
  92. # tryencoding = pref:encoding (no default, explicitly set
  93. # encoding) -- i.e. if there are doc prefs for this
  94. # path, then give this encoding a try. If not given,
  95. # then utf-8 for XML/XSLT/VisualBasic and
  96. # pref:encodingDefault for others (though this is
  97. # all prefable via the 'languages' pref struct).
  98. # tryxmldecl
  99. # trymeta (HTML meta)
  100. # trymodeline
  101. # autodetect (whether to try at all)
  102. #
  103. # if autodetect or tryencoding:
  104. # koUnicodeEncoding.autoDetectEncoding()
  105. # else:
  106. # if encoding.startswith('utf'): # note this is pref:encodingDefault
  107. # check bom
  108. # presume encoding is right (give up if conversion fails)
  109. # else:
  110. # presume encoding is right (given up if fails)
  111. #
  112. # Working through koUnicodeEncoding.autoDetectEncoding:
  113. # if tryxmldecl: ...
  114. # if tryhtmlmeta: ...
  115. # if trymodeline: ...
  116. # use bom: ...
  117. # ----------------
  118. __version_info__ = (0, 1, 0)
  119. __version__ = '.'.join(map(str, __version_info__))
  120. import os
  121. from os.path import join, dirname, abspath, basename, exists
  122. import sys
  123. import re
  124. from pprint import pprint
  125. import traceback
  126. import warnings
  127. import logging
  128. import optparse
  129. import codecs
  130. import locale
  131. import langinfo
  132. #---- exceptions and warnings
  133. class TextInfoError(Exception):
  134. pass
  135. class TextInfoConfigError(TextInfoError):
  136. pass
  137. class ChardetImportWarning(ImportWarning):
  138. pass
  139. warnings.simplefilter("once", ChardetImportWarning)
  140. #---- globals
  141. log = logging.getLogger("textinfo")
  142. # For debugging:
  143. DEBUG_CHARDET_INFO = False # gather chardet info
  144. #---- module API
  145. def textinfo_from_filename(path):
  146. """Determine test info for the given path **using the filename only**.
  147. No attempt is made to stat or read the file.
  148. """
  149. return TextInfo.init_from_filename(path)
  150. def textinfo_from_path(path, encoding=None, follow_symlinks=False,
  151. quick_determine_lang=False):
  152. """Determine text info for the given path.
  153. This raises EnvironmentError if the path doesn't not exist or could
  154. not be read.
  155. """
  156. return TextInfo.init_from_path(path, encoding=encoding,
  157. follow_symlinks=follow_symlinks,
  158. quick_determine_lang=quick_determine_lang)
  159. #---- main TextInfo class
  160. class TextInfo(object):
  161. path = None
  162. file_type_name = None # e.g. "regular file", "directory", ...
  163. file_type = None # stat.S_IFMT(os.stat(path).st_mode)
  164. file_mode = None # stat.S_IMODE(os.stat(path).st_mode)
  165. is_text = None
  166. encoding = None
  167. has_bom = None # whether the text has a BOM (Byte Order Marker)
  168. encoding_bozo = False
  169. encoding_bozo_reasons = None
  170. lang = None # e.g. "Python", "Perl", ...
  171. langinfo = None # langinfo.LangInfo instance or None
  172. # Enable chardet-based heuristic guessing of encoding as a last
  173. # resort for file types known to not be binary.
  174. CHARDET_ENABLED = True
  175. CHARDET_THRESHHOLD = 0.9 # >=90% confidence to avoid false positives.
  176. @classmethod
  177. def init_from_filename(cls, path, lidb=None):
  178. """Create an instance using only the filename to initialize."""
  179. if lidb is None:
  180. lidb = langinfo.get_default_database()
  181. self = cls()
  182. self.path = path
  183. self._classify_from_filename(lidb)
  184. return self
  185. @classmethod
  186. def init_from_path(cls, path, encoding=None, lidb=None,
  187. follow_symlinks=False,
  188. quick_determine_lang=False,
  189. env=None):
  190. """Create an instance using the filename and stat/read info
  191. from the given path to initialize.
  192. @param follow_symlinks {boolean} can be set to True to have
  193. the textinfo returned for a symlink be for linked-to file. By
  194. default the textinfo is for the symlink itself.
  195. @param quick_determine_lang {boolean} can be set to True to have
  196. processing stop as soon as the language has been determined.
  197. Note that this means some fields will not be populated.
  198. @param env {runtime environment} A "runtime environment" class
  199. whose behaviour is used to influence processing. Currently
  200. it is just used to provide a hook for lang determination
  201. by filename (for Komodo).
  202. """
  203. if lidb is None:
  204. lidb = langinfo.get_default_database()
  205. self = cls()
  206. self.path = path
  207. self._accessor = PathAccessor(path, follow_symlinks=follow_symlinks)
  208. try:
  209. #TODO: pref: Is a preference specified for this path?
  210. self._classify_from_stat(lidb)
  211. if self.file_type_name != "regular file":
  212. # Don't continue if not a regular file.
  213. return self
  214. #TODO: add 'pref:treat_as_text' a la TextMate (or
  215. # perhaps that is handled in _classify_from_filename())
  216. self._classify_from_filename(lidb, env)
  217. if self.is_text is False:
  218. return self
  219. if self.lang and quick_determine_lang:
  220. return self
  221. if not self.lang:
  222. self._classify_from_magic(lidb)
  223. if self.is_text is False:
  224. return self
  225. if self.lang and quick_determine_lang:
  226. return self
  227. self._classify_encoding(lidb, suggested_encoding=encoding)
  228. if self.is_text is None and self.encoding:
  229. self.is_text = True
  230. if self.is_text is False:
  231. return self
  232. self.text = self._accessor.text
  233. if self.text: # No `self.text' with current UTF-32 hack.
  234. self._classify_from_content(lidb)
  235. return self
  236. finally:
  237. # Free the memory used by the accessor.
  238. del self._accessor
  239. def __repr__(self):
  240. if self.path:
  241. return "<TextInfo %r>" % self.path
  242. else:
  243. return "<TextInfo %r>"\
  244. % _one_line_summary_from_text(self.content, 30)
  245. def as_dict(self):
  246. return dict((k,v) for k,v in self.__dict__.items()
  247. if not k.startswith('_'))
  248. def as_summary(self):
  249. """One-liner string summary of text info."""
  250. d = self.as_dict()
  251. info = []
  252. if self.file_type_name and self.file_type_name != "regular file":
  253. info.append(self.file_type_name)
  254. else:
  255. info.append(self.lang or "???")
  256. if not self.is_text:
  257. info.append("binary")
  258. elif self.encoding:
  259. enc = self.encoding
  260. if self.has_bom:
  261. enc += " (bom)"
  262. info.append(enc)
  263. if DEBUG_CHARDET_INFO and hasattr(self, "chardet_info") \
  264. and self.chardet_info["encoding"]:
  265. info.append("chardet:%s/%.1f%%"
  266. % (self.chardet_info["encoding"],
  267. self.chardet_info["confidence"] * 100.0))
  268. return "%s: %s" % (self.path, ', '.join(info))
  269. def _classify_from_content(self, lidb):
  270. #TODO: Plan:
  271. # - eol_* attrs (test cases for this!)
  272. head = self.text[:self._accessor.HEAD_SIZE]
  273. tail = self.text[-self._accessor.TAIL_SIZE:]
  274. # If lang is unknown, attempt to guess from XML prolog or
  275. # shebang now that we've successfully decoded the buffer.
  276. if self.langinfo is None:
  277. (self.has_xml_prolog, xml_version,
  278. xml_encoding) = self._get_xml_prolog_info(head)
  279. if self.has_xml_prolog:
  280. self.xml_version = xml_version
  281. self.xml_encoding = xml_encoding
  282. self.langinfo = lidb.langinfo_from_lang("XML")
  283. self.lang = self.langinfo.name
  284. elif self.text.startswith("#!"):
  285. li = lidb.langinfo_from_magic(self.text, shebang_only=True)
  286. if li:
  287. self.langinfo = li
  288. self.lang = li.name
  289. # Extract Emacs local vars and Vi(m) modeline info and, if the
  290. # lang is still unknown, attempt to use them to determine it.
  291. self.emacs_vars = self._get_emacs_head_vars(head)
  292. self.emacs_vars.update(self._get_emacs_tail_vars(tail))
  293. self.vi_vars = self._get_vi_vars(head)
  294. if not self.vi_vars:
  295. self.vi_vars = self._get_vi_vars(tail)
  296. if self.langinfo is None and "mode" in self.emacs_vars:
  297. li = lidb.langinfo_from_emacs_mode(self.emacs_vars["mode"])
  298. if li:
  299. self.langinfo = li
  300. self.lang = li.name
  301. if self.langinfo is None and "filetype" in self.vi_vars \
  302. or "ft" in self.vi_vars:
  303. vi_filetype = self.vi_vars.get("filetype") or self.vi_vars.get("ft")
  304. li = lidb.langinfo_from_vi_filetype(vi_filetype)
  305. if li:
  306. self.langinfo = li
  307. self.lang = li.name
  308. if self.langinfo is not None:
  309. if self.langinfo.conforms_to("XML"):
  310. if not hasattr(self, "has_xml_prolog"):
  311. (self.has_xml_prolog, self.xml_version,
  312. self.xml_encoding) = self._get_xml_prolog_info(head)
  313. (self.has_doctype_decl, self.doctype_decl,
  314. self.doctype_name, self.doctype_public_id,
  315. self.doctype_system_id) = self._get_doctype_decl_info(head)
  316. # If this is just plain XML, we try to use the doctype
  317. # decl to choose a more specific XML lang.
  318. if self.lang == "XML" and self.has_doctype_decl:
  319. li = lidb.langinfo_from_doctype(
  320. public_id=self.doctype_public_id,
  321. system_id=self.doctype_system_id)
  322. if li and li.name != "XML":
  323. self.langinfo = li
  324. self.lang = li.name
  325. elif self.langinfo.conforms_to("HTML"):
  326. (self.has_doctype_decl, self.doctype_decl,
  327. self.doctype_name, self.doctype_public_id,
  328. self.doctype_system_id) = self._get_doctype_decl_info(head)
  329. # Allow promotion to XHTML (or other HTML flavours) based
  330. # on doctype.
  331. if self.lang == "HTML" and self.has_doctype_decl:
  332. li = lidb.langinfo_from_doctype(
  333. public_id=self.doctype_public_id,
  334. system_id=self.doctype_system_id)
  335. if li and li.name != "HTML":
  336. self.langinfo = li
  337. self.lang = li.name
  338. # Look for XML prolog and promote HTML -> XHTML if it
  339. # exists. Note that this wins over a plain HTML doctype.
  340. (self.has_xml_prolog, xml_version,
  341. xml_encoding) = self._get_xml_prolog_info(head)
  342. if self.has_xml_prolog:
  343. self.xml_version = xml_version
  344. self.xml_encoding = xml_encoding
  345. if self.lang == "HTML":
  346. li = lidb.langinfo_from_lang("XHTML")
  347. self.langinfo = li
  348. self.lang = li.name
  349. # Attempt to specialize the lang.
  350. if self.langinfo is not None:
  351. li = lidb.specialized_langinfo_from_content(self.langinfo, self.text)
  352. if li:
  353. self.langinfo = li
  354. self.lang = li.name
  355. def _classify_from_magic(self, lidb):
  356. """Attempt to classify from the file's magic number/shebang
  357. line, doctype, etc.
  358. Note that this is done before determining the encoding, so we are
  359. working with the *bytes*, not chars.
  360. """
  361. self.has_bom, bom, bom_encoding = self._get_bom_info()
  362. if self.has_bom:
  363. # If this file has a BOM then, unless something funny is
  364. # happening, this will be a text file encoded with
  365. # `bom_encoding`. We leave that to `_classify_encoding()`.
  366. return
  367. # Without a BOM we assume this is an 8-bit encoding, for the
  368. # purposes of looking at, e.g. a shebang line.
  369. #
  370. # UTF-16 and UTF-32 without a BOM is rare; we won't pick up on,
  371. # e.g. Python encoded as UCS-2 or UCS-4 here (but
  372. # `_classify_encoding()` should catch most of those cases).
  373. head_bytes = self._accessor.head_bytes
  374. li = lidb.langinfo_from_magic(head_bytes)
  375. if li:
  376. log.debug("lang from magic: %s", li.name)
  377. self.langinfo = li
  378. self.lang = li.name
  379. self.is_text = li.is_text
  380. return
  381. (has_doctype_decl, doctype_decl, doctype_name, doctype_public_id,
  382. doctype_system_id) = self._get_doctype_decl_info(head_bytes)
  383. if has_doctype_decl:
  384. li = lidb.langinfo_from_doctype(public_id=doctype_public_id,
  385. system_id=doctype_system_id)
  386. if li:
  387. log.debug("lang from doctype: %s", li.name)
  388. self.langinfo = li
  389. self.lang = li.name
  390. self.is_text = li.is_text
  391. return
  392. def _classify_encoding(self, lidb, suggested_encoding=None):
  393. """To classify from the content we need to separate text from
  394. binary, and figure out the encoding. This is an imperfect task.
  395. The algorithm here is to go through the following heroics to attempt
  396. to determine an encoding that works to decode the content. If all
  397. such attempts fail, we presume it is binary.
  398. 1. Use the BOM, if it has one.
  399. 2. Try the given suggested encoding (if any).
  400. 3. Check for EBCDIC encoding.
  401. 4. Lang-specific (if we know the lang already):
  402. * if this is Python, look for coding: decl and try that
  403. * if this is Perl, look for use encoding decl and try that
  404. * ...
  405. 5. XML: According to the XML spec the rule is the XML prolog
  406. specifies the encoding, or it is UTF-8.
  407. 6. HTML: Attempt to use Content-Type meta tag. Try the given
  408. charset, if any.
  409. 7. Emacs-style "coding" local var.
  410. 8. Vi[m]-style "fileencoding" local var.
  411. 9. Heuristic checks for UTF-16 without BOM.
  412. 10. Give UTF-8 a try, it is a pretty common fallback.
  413. We must do this before a possible 8-bit
  414. `locale.getpreferredencoding()` because any UTF-8 encoded
  415. document will decode with an 8-bit encoding (i.e. will decode,
  416. just with bogus characters).
  417. 11. Lang-specific fallback. E.g., UTF-8 for XML, ascii for Python.
  418. 12. chardet (http://chardet.feedparser.org/), if CHARDET_ENABLED == True
  419. 13. locale.getpreferredencoding()
  420. 14. iso8859-1 (in case `locale.getpreferredencoding()` is UTF-8
  421. we must have an 8-bit encoding attempt).
  422. TODO: Is there a worry for a lot of false-positives for
  423. binary files.
  424. Notes:
  425. - A la Universal Feed Parser, if some
  426. supposed-to-be-authoritative encoding indicator is wrong (e.g.
  427. the BOM, the Python 'coding:' decl for Python),
  428. `self.encoding_bozo` is set True and a reason is appended to
  429. the `self.encoding_bozo_reasons` list.
  430. """
  431. # 1. Try the BOM.
  432. if self.has_bom is not False: # Was set in `_classify_from_magic()`.
  433. self.has_bom, bom, bom_encoding = self._get_bom_info()
  434. if self.has_bom:
  435. self._accessor.strip_bom(bom)
  436. # Python doesn't currently include a UTF-32 codec. For now
  437. # we'll *presume* that a UTF-32 BOM is correct. The
  438. # limitation is that `self.text' will NOT get set
  439. # because we cannot decode it.
  440. if bom_encoding in ("utf-32-le", "utf-32-be") \
  441. or self._accessor.decode(bom_encoding):
  442. log.debug("encoding: encoding from BOM: %r", bom_encoding)
  443. self.encoding = bom_encoding
  444. return
  445. else:
  446. log.debug("encoding: BOM encoding (%r) was *wrong*",
  447. bom_encoding)
  448. self._encoding_bozo(
  449. u"BOM encoding (%s) could not decode %s"
  450. % (bom_encoding, self._accessor))
  451. head_bytes = self._accessor.head_bytes
  452. if DEBUG_CHARDET_INFO:
  453. sys.path.insert(0, os.path.expanduser("~/tm/check/contrib/chardet"))
  454. import chardet
  455. del sys.path[0]
  456. self.chardet_info = chardet.detect(head_bytes)
  457. # 2. Try the suggested encoding.
  458. if suggested_encoding is not None:
  459. norm_suggested_encoding = _norm_encoding(suggested_encoding)
  460. if self._accessor.decode(suggested_encoding):
  461. self.encoding = norm_suggested_encoding
  462. return
  463. else:
  464. log.debug("encoding: suggested %r encoding didn't work for %s",
  465. suggested_encoding, self._accessor)
  466. # 3. Check for EBCDIC.
  467. #TODO: Not sure this should be included, chardet may be better
  468. # at this given different kinds of EBCDIC.
  469. EBCDIC_MAGIC = '\x4c\x6f\xa7\x94'
  470. if self._accessor.head_4_bytes == EBCDIC_MAGIC:
  471. # This is EBCDIC, but I don't know if there are multiple kinds
  472. # of EBCDIC. Python has a 'ebcdic-cp-us' codec. We'll use
  473. # that for now.
  474. norm_ebcdic_encoding = _norm_encoding("ebcdic-cp-us")
  475. if self._accessor.decode(norm_ebcdic_encoding):
  476. log.debug("EBCDIC encoding: %r", norm_ebcdic_encoding)
  477. self.encoding = norm_ebcdic_encoding
  478. return
  479. else:
  480. log.debug("EBCDIC encoding didn't work for %s",
  481. self._accessor)
  482. # 4. Lang-specific (if we know the lang already).
  483. if self.langinfo and self.langinfo.conformant_attr("encoding_decl_pattern"):
  484. m = self.langinfo.conformant_attr("encoding_decl_pattern") \
  485. .search(head_bytes)
  486. if m:
  487. lang_encoding = m.group("encoding")
  488. norm_lang_encoding = _norm_encoding(lang_encoding)
  489. if self._accessor.decode(norm_lang_encoding):
  490. log.debug("encoding: encoding from lang-spec: %r",
  491. norm_lang_encoding)
  492. self.encoding = norm_lang_encoding
  493. return
  494. else:
  495. log.debug("encoding: lang-spec encoding (%r) was *wrong*",
  496. lang_encoding)
  497. self._encoding_bozo(
  498. u"lang-spec encoding (%s) could not decode %s"
  499. % (lang_encoding, self._accessor))
  500. # 5. XML prolog
  501. if self.langinfo and self.langinfo.conforms_to("XML"):
  502. has_xml_prolog, xml_version, xml_encoding \
  503. = self._get_xml_prolog_info(head_bytes)
  504. if xml_encoding is not None:
  505. norm_xml_encoding = _norm_encoding(xml_encoding)
  506. if self._accessor.decode(norm_xml_encoding):
  507. log.debug("encoding: encoding from XML prolog: %r",
  508. norm_xml_encoding)
  509. self.encoding = norm_xml_encoding
  510. return
  511. else:
  512. log.debug("encoding: XML prolog encoding (%r) was *wrong*",
  513. norm_xml_encoding)
  514. self._encoding_bozo(
  515. u"XML prolog encoding (%s) could not decode %s"
  516. % (norm_xml_encoding, self._accessor))
  517. # 6. HTML: Attempt to use Content-Type meta tag.
  518. if self.langinfo and self.langinfo.conforms_to("HTML"):
  519. has_http_content_type_info, http_content_type, http_encoding \
  520. = self._get_http_content_type_info(head_bytes)
  521. if has_http_content_type_info and http_encoding:
  522. norm_http_encoding = _norm_encoding(http_encoding)
  523. if self._accessor.decode(norm_http_encoding):
  524. log.debug("encoding: encoding from HTTP content-type: %r",
  525. norm_http_encoding)
  526. self.encoding = norm_http_encoding
  527. return
  528. else:
  529. log.debug("encoding: HTTP content-type encoding (%r) was *wrong*",
  530. norm_http_encoding)
  531. self._encoding_bozo(
  532. u"HTML content-type encoding (%s) could not decode %s"
  533. % (norm_http_encoding, self._accessor))
  534. # 7. Emacs-style local vars.
  535. emacs_head_vars = self._get_emacs_head_vars(head_bytes)
  536. emacs_encoding = emacs_head_vars.get("coding")
  537. if not emacs_encoding:
  538. tail_bytes = self._accessor.tail_bytes
  539. emacs_tail_vars = self._get_emacs_tail_vars(tail_bytes)
  540. emacs_encoding = emacs_tail_vars.get("coding")
  541. if emacs_encoding:
  542. norm_emacs_encoding = _norm_encoding(emacs_encoding)
  543. if self._accessor.decode(norm_emacs_encoding):
  544. log.debug("encoding: encoding from Emacs coding var: %r",
  545. norm_emacs_encoding)
  546. self.encoding = norm_emacs_encoding
  547. return
  548. else:
  549. log.debug("encoding: Emacs coding var (%r) was *wrong*",
  550. norm_emacs_encoding)
  551. self._encoding_bozo(
  552. u"Emacs coding var (%s) could not decode %s"
  553. % (norm_emacs_encoding, self._accessor))
  554. # 8. Vi[m]-style local vars.
  555. vi_vars = self._get_vi_vars(head_bytes)
  556. vi_encoding = vi_vars.get("fileencoding") or vi_vars.get("fenc")
  557. if not vi_encoding:
  558. vi_vars = self._get_vi_vars(self._accessor.tail_bytes)
  559. vi_encoding = vi_vars.get("fileencoding") or vi_vars.get("fenc")
  560. if vi_encoding:
  561. norm_vi_encoding = _norm_encoding(vi_encoding)
  562. if self._accessor.decode(norm_vi_encoding):
  563. log.debug("encoding: encoding from Vi[m] coding var: %r",
  564. norm_vi_encoding)
  565. self.encoding = norm_vi_encoding
  566. return
  567. else:
  568. log.debug("encoding: Vi[m] coding var (%r) was *wrong*",
  569. norm_vi_encoding)
  570. self._encoding_bozo(
  571. u"Vi[m] coding var (%s) could not decode %s"
  572. % (norm_vi_encoding, self._accessor))
  573. # 9. Heuristic checks for UTF-16 without BOM.
  574. utf16_encoding = None
  575. head_odd_bytes = head_bytes[0::2]
  576. head_even_bytes = head_bytes[1::2]
  577. head_markers = ["<?xml", "#!"]
  578. for head_marker in head_markers:
  579. length = len(head_marker)
  580. if head_odd_bytes.startswith(head_marker) \
  581. and head_even_bytes[0:length] == '\x00'*length:
  582. utf16_encoding = "utf-16-le"
  583. break
  584. elif head_even_bytes.startswith(head_marker) \
  585. and head_odd_bytes[0:length] == '\x00'*length:
  586. utf16_encoding = "utf-16-be"
  587. break
  588. internal_markers = ["coding"]
  589. for internal_marker in internal_markers:
  590. length = len(internal_marker)
  591. try:
  592. idx = head_odd_bytes.index(internal_marker)
  593. except ValueError:
  594. pass
  595. else:
  596. if head_even_bytes[idx:idx+length] == '\x00'*length:
  597. utf16_encoding = "utf-16-le"
  598. try:
  599. idx = head_even_bytes.index(internal_marker)
  600. except ValueError:
  601. pass
  602. else:
  603. if head_odd_bytes[idx:idx+length] == '\x00'*length:
  604. utf16_encoding = "utf-16-be"
  605. if utf16_encoding:
  606. if self._accessor.decode(utf16_encoding):
  607. log.debug("encoding: guessed encoding: %r", utf16_encoding)
  608. self.encoding = utf16_encoding
  609. return
  610. # 10. Give UTF-8 a try.
  611. norm_utf8_encoding = _norm_encoding("utf-8")
  612. if self._accessor.decode(norm_utf8_encoding):
  613. log.debug("UTF-8 encoding: %r", norm_utf8_encoding)
  614. self.encoding = norm_utf8_encoding
  615. return
  616. # 11. Lang-specific fallback (e.g. XML -> utf-8, Python -> ascii, ...).
  617. # Note: A potential problem here is that a fallback encoding here that
  618. # is a pre-Unicode Single-Byte encoding (like iso8859-1) always "works"
  619. # so the subsequent heuristics never get tried.
  620. fallback_encoding = None
  621. fallback_lang = None
  622. if self.langinfo:
  623. fallback_lang = self.langinfo.name
  624. fallback_encoding = self.langinfo.conformant_attr("default_encoding")
  625. if fallback_encoding:
  626. if self._accessor.decode(fallback_encoding):
  627. log.debug("encoding: fallback encoding for %s: %r",
  628. fallback_lang, fallback_encoding)
  629. self.encoding = fallback_encoding
  630. return
  631. else:
  632. log.debug("encoding: %s fallback encoding (%r) was *wrong*",
  633. fallback_lang, fallback_encoding)
  634. self._encoding_bozo(
  635. u"%s fallback encoding (%s) could not decode %s"
  636. % (fallback_lang, fallback_encoding, self._accessor))
  637. # 12. chardet (http://chardet.feedparser.org/)
  638. # Note: I'm leary of using this b/c (a) it's a sizeable perf
  639. # hit and (b) false positives -- for example, the first 8kB of
  640. # /usr/bin/php on Mac OS X 10.4.10 is ISO-8859-2 with 44%
  641. # confidence. :)
  642. # Solution: (a) Only allow for content we know is not binary
  643. # (from langinfo association); and (b) can be disabled via
  644. # CHARDET_ENABLED class attribute.
  645. if self.CHARDET_ENABLED and self.langinfo and self.langinfo.is_text:
  646. try:
  647. import chardet
  648. except ImportError:
  649. warnings.warn("no chardet module to aid in guessing encoding",
  650. ChardetImportWarning)
  651. else:
  652. chardet_info = chardet.detect(head_bytes)
  653. if chardet_info["encoding"] \
  654. and chardet_info["confidence"] > self.CHARDET_THRESHHOLD:
  655. chardet_encoding = chardet_info["encoding"]
  656. norm_chardet_encoding = _norm_encoding(chardet_encoding)
  657. if self._accessor.decode(norm_chardet_encoding):
  658. log.debug("chardet encoding: %r", chardet_encoding)
  659. self.encoding = norm_chardet_encoding
  660. return
  661. # 13. locale.getpreferredencoding()
  662. # Typical values for this:
  663. # Windows: cp1252 (aka windows-1252)
  664. # Mac OS X: mac-roman
  665. # Linux: UTF-8 (modern Linux anyway)
  666. # Solaris 8: 464 (aka ASCII)
  667. locale_encoding = locale.getpreferredencoding()
  668. if locale_encoding:
  669. norm_locale_encoding = _norm_encoding(locale_encoding)
  670. if self._accessor.decode(norm_locale_encoding):
  671. log.debug("encoding: locale preferred encoding: %r",
  672. locale_encoding)
  673. self.encoding = norm_locale_encoding
  674. return
  675. # 14. iso8859-1
  676. norm_fallback8bit_encoding = _norm_encoding("iso8859-1")
  677. if self._accessor.decode(norm_fallback8bit_encoding):
  678. log.debug("fallback 8-bit encoding: %r", norm_fallback8bit_encoding)
  679. self.encoding = norm_fallback8bit_encoding
  680. return
  681. # We couldn't find an encoding that works. Give up and presume
  682. # this is binary content.
  683. self.is_text = False
  684. def _encoding_bozo(self, reason):
  685. self.encoding_bozo = True
  686. if self.encoding_bozo_reasons is None:
  687. self.encoding_bozo_reasons = []
  688. self.encoding_bozo_reasons.append(reason)
  689. # c.f. http://www.xml.com/axml/target.html#NT-prolog
  690. _xml_prolog_pat = re.compile(
  691. r'''<\?xml
  692. ( # strict ordering is reqd but we'll be liberal here
  693. \s+version=['"](?P<ver>.*?)['"]
  694. | \s+encoding=['"](?P<enc>.*?)['"]
  695. )+
  696. .*? # other possible junk
  697. \s*\?>
  698. ''',
  699. re.VERBOSE | re.DOTALL
  700. )
  701. def _get_xml_prolog_info(self, head_bytes):
  702. """Parse out info from the '<?xml version=...' prolog, if any.
  703. Returns (<has-xml-prolog>, <xml-version>, <xml-encoding>). Examples:
  704. (False, None, None)
  705. (True, "1.0", None)
  706. (True, "1.0", "UTF-16")
  707. """
  708. # Presuming an 8-bit encoding. If it is UTF-16 or UTF-32, then
  709. # that should have been picked up by an earlier BOM check or via
  710. # the subsequent heuristic check for UTF-16 without a BOM.
  711. if not head_bytes.startswith("<?xml"):
  712. return (False, None, None)
  713. # Try to extract more info from the prolog.
  714. match = self._xml_prolog_pat.match(head_bytes)
  715. if not match:
  716. if log.isEnabledFor(logging.DEBUG):
  717. log.debug("`%s': could not match XML prolog: '%s'", self.path,
  718. _one_line_summary_from_text(head_bytes, 40))
  719. return (False, None, None)
  720. xml_version = match.group("ver")
  721. xml_encoding = match.group("enc")
  722. return (True, xml_version, xml_encoding)
  723. _html_meta_tag_pat = re.compile("""
  724. (<meta
  725. (?:\s+[\w-]+\s*=\s*(?:".*?"|'.*?'))+ # attributes
  726. \s*/?>)
  727. """,
  728. re.IGNORECASE | re.VERBOSE
  729. )
  730. _html_attr_pat = re.compile(
  731. # Currently requiring XML attrs (i.e. quoted value).
  732. '''(?:\s+([\w-]+)\s*=\s*(".*?"|'.*?'))'''
  733. )
  734. _http_content_type_splitter = re.compile(";\s*")
  735. def _get_http_content_type_info(self, head_bytes):
  736. """Returns info extracted from an HTML content-type meta tag if any.
  737. Returns (<has-http-content-type-info>, <content-type>, <charset>).
  738. For example:
  739. <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
  740. yields:
  741. (True, "text/html", "utf-8")
  742. """
  743. # Presuming an 8-bit encoding. If it is UTF-16 or UTF-32, then
  744. # that should have been picked up by an earlier BOM check.
  745. # Otherwise we rely on `chardet` to cover us.
  746. # Parse out '<meta ...>' tags, then the attributes in them.
  747. for meta_tag in self._html_meta_tag_pat.findall(head_bytes):
  748. meta = dict( (k.lower(), v[1:-1])
  749. for k,v in self._html_attr_pat.findall(meta_tag))
  750. if "http-equiv" in meta \
  751. and meta["http-equiv"].lower() == "content-type":
  752. content = meta.get("content", "")
  753. break
  754. else:
  755. return (False, None, None)
  756. # We found a http-equiv="Content-Type" tag, parse its content
  757. # attribute value.
  758. parts = [p.strip() for p in self._http_content_type_splitter.split(content)]
  759. if not parts:
  760. return (False, None, None)
  761. content_type = parts[0] or None
  762. for p in parts[1:]:
  763. if p.lower().startswith("charset="):
  764. charset = p[len("charset="):]
  765. if charset and charset[0] in ('"', "'"):
  766. charset = charset[1:]
  767. if charset and charset[-1] in ('"', "'"):
  768. charset = charset[:-1]
  769. break
  770. else:
  771. charset = None
  772. return (True, content_type, charset)
  773. #TODO: Note that this isn't going to catch the current HTML 5
  774. # doctype: '<!DOCTYPE html>'
  775. _doctype_decl_re = re.compile(r'''
  776. <!DOCTYPE
  777. \s+(?P<name>[a-zA-Z_:][\w:.-]*)
  778. \s+(?:
  779. SYSTEM\s+(["'])(?P<system_id_a>.*?)\2
  780. |
  781. PUBLIC
  782. \s+(["'])(?P<public_id_b>.*?)\4
  783. # HTML 3.2 and 2.0 doctypes don't include a system-id.
  784. (?:\s+(["'])(?P<system_id_b>.*?)\6)?
  785. )
  786. (\s*\[.*?\])?
  787. \s*>
  788. ''', re.IGNORECASE | re.DOTALL | re.UNICODE | re.VERBOSE)
  789. def _get_doctype_decl_info(self, head):
  790. """Parse out DOCTYPE info from the given XML or HTML content.
  791. Returns a tuple of the form:
  792. (<has-doctype-decl>, <doctype-decl>,
  793. <name>, <public-id>, <system-id>)
  794. The <public-id> is normalized as per this comment in the XML 1.0
  795. spec:
  796. Before a match is attempted, all strings of white space in the
  797. public identifier must be normalized to single space
  798. characters (#x20), and leading and trailing white space must
  799. be removed.
  800. Examples:
  801. (False, None, None, None, None)
  802. (True, '<!DOCTYPE greeting SYSTEM "hello.dtd">',
  803. 'greeting', None, 'hello.dtd'),
  804. (True,
  805. '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">',
  806. 'html',
  807. '-//W3C//DTD XHTML 1.0 Transitional//EN',
  808. 'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd')
  809. Here is the spec for DOCTYPE decls in XML:
  810. http://www.xml.com/axml/target.html#NT-doctypedecl
  811. We loosely follow this to allow for some decls in HTML that isn't
  812. proper XML. As well, we are only parsing out decls that reference
  813. an external ID, as opposed to those that define entities locally.
  814. """
  815. if "<!DOCTYPE" not in head: # quick out
  816. return (False, None, None, None, None)
  817. m = self._doctype_decl_re.search(head)
  818. if not m:
  819. return (False, None, None, None, None)
  820. d = m.groupdict()
  821. name = d.get("name")
  822. system_id = d.get("system_id_a") or d.get("system_id_b")
  823. public_id = d.get("public_id_b")
  824. if public_id:
  825. public_id = re.sub("\s+", ' ', public_id.strip()) # normalize
  826. return (True, m.group(0), name, public_id, system_id)
  827. _emacs_vars_head_pat = re.compile("-\*-\s*(.*?)\s*-\*-")
  828. _emacs_head_vars_cache = None
  829. def _get_emacs_head_vars(self, head_bytes):
  830. """Return a dictionary of emacs-style local variables in the head.
  831. "Head" emacs vars on the ones in the '-*- ... -*-' one-liner.
  832. Parsing is done loosely according to this spec (and according to
  833. some in-practice deviations from this):
  834. http://www.gnu.org/software/emacs/manual/html_node/emacs/Specifying-File-Variables.html#Specifying-File-Variables
  835. """
  836. # Presuming an 8-bit encoding. If it is UTF-16 or UTF-32, then
  837. # that should have been picked up by an earlier BOM check.
  838. # Otherwise we rely on `chardet` to cover us.
  839. if self._emacs_head_vars_cache is not None:
  840. return self._emacs_head_vars_cache
  841. # Search the head for a '-*-'-style one-liner of variables.
  842. emacs_vars = {}
  843. if "-*-" in head_bytes:
  844. match = self._emacs_vars_head_pat.search(head_bytes)
  845. if match:
  846. emacs_vars_str = match.group(1)
  847. if '\n' in emacs_vars_str:
  848. raise ValueError("local variables error: -*- not "
  849. "terminated before end of line")
  850. emacs_var_strs = [s.strip() for s in emacs_vars_str.split(';')
  851. if s.strip()]
  852. if len(emacs_var_strs) == 1 and ':' not in emacs_var_strs[0]:
  853. # While not in the spec, this form is allowed by emacs:
  854. # -*- Tcl -*-
  855. # where the implied "variable" is "mode". This form
  856. # is only allowed if there are no other variables.
  857. emacs_vars["mode"] = emacs_var_strs[0].strip()
  858. else:
  859. for emacs_var_str in emacs_var_strs:
  860. try:
  861. variable, value = emacs_var_str.strip().split(':', 1)
  862. except ValueError:
  863. log.debug("emacs variables error: malformed -*- "
  864. "line: %r", emacs_var_str)
  865. continue
  866. # Lowercase the variable name because Emacs allows "Mode"
  867. # or "mode" or "MoDe", etc.
  868. emacs_vars[variable.lower()] = value.strip()
  869. # Unquote values.
  870. for var, val in emacs_vars.items():
  871. if len(val) > 1 and (val.startswith('"') and val.endswith('"')
  872. or val.startswith('"') and val.endswith('"')):
  873. emacs_vars[var] = val[1:-1]
  874. self._emacs_head_vars_cache = emacs_vars
  875. return emacs_vars
  876. # This regular expression is intended to match blocks like this:
  877. # PREFIX Local Variables: SUFFIX
  878. # PREFIX mode: Tcl SUFFIX
  879. # PREFIX End: SUFFIX
  880. # Some notes:
  881. # - "[ \t]" is used instead of "\s" to specifically exclude newlines
  882. # - "(\r\n|\n|\r)" is used instead of "$" because the sre engine does
  883. # not like anything other than Unix-style line terminators.
  884. _emacs_vars_tail_pat = re.compile(r"""^
  885. (?P<prefix>(?:[^\r\n|\n|\r])*?)
  886. [\ \t]*Local\ Variables:[\ \t]*
  887. (?P<suffix>.*?)(?:\r\n|\n|\r)
  888. (?P<content>.*?\1End:)
  889. """, re.IGNORECASE | re.MULTILINE | re.DOTALL | re.VERBOSE)
  890. _emacs_tail_vars_cache = None
  891. def _get_emacs_tail_vars(self, tail_bytes):
  892. r"""Return a dictionary of emacs-style local variables in the tail.
  893. "Tail" emacs vars on the ones in the multi-line "Local
  894. Variables:" block.
  895. >>> TextInfo()._get_emacs_tail_vars('# Local Variables:\n# foo: bar\n# End:')
  896. {'foo': 'bar'}
  897. >>> TextInfo()._get_emacs_tail_vars('# Local Variables:\n# foo: bar\\\n# baz\n# End:')
  898. {'foo': 'bar baz'}
  899. >>> TextInfo()._get_emacs_tail_vars('# Local Variables:\n# quoted: "bar "\n# End:')
  900. {'quoted': 'bar '}
  901. Parsing is done according to this spec (and according to some
  902. in-practice deviations from this):
  903. http://www.gnu.org/software/emacs/manual/html_chapter/emacs_33.html#SEC485
  904. """
  905. # Presuming an 8-bit encoding. If it is UTF-16 or UTF-32, then
  906. # that should have been picked up by an earlier BOM check.
  907. # Otherwise we rely on `chardet` to cover us.
  908. if self._emacs_tail_vars_cache is not None:
  909. return self._emacs_tail_vars_cache
  910. emacs_vars = {}
  911. if "Local Variables" not in tail_bytes:
  912. self._emacs_tail_vars_cache = emacs_vars
  913. return emacs_vars
  914. match = self._emacs_vars_tail_pat.search(tail_bytes)
  915. if match:
  916. prefix = match.group("prefix")
  917. suffix = match.group("suffix")
  918. lines = match.group("content").splitlines(0)
  919. #print "prefix=%r, suffix=%r, content=%r, lines: %s"\
  920. # % (prefix, suffix, match.group("content"), lines)
  921. # Validate the Local Variables block: proper prefix and suffix
  922. # usage.
  923. for i, line in enumerate(lines):
  924. if not line.startswith(prefix):
  925. log.debug("emacs variables error: line '%s' "
  926. "does not use proper prefix '%s'"
  927. % (line, prefix))
  928. return {}
  929. # Don't validate suffix on last line. Emacs doesn't care,
  930. # neither should we.
  931. if i != len(lines)-1 and not line.endswith(suffix):
  932. log.debug("emacs variables error: line '%s' "
  933. "does not use proper suffix '%s'"
  934. % (line, suffix))
  935. return {}
  936. # Parse out one emacs var per line.
  937. continued_for = None
  938. for line in lines[:-1]: # no var on the last line ("PREFIX End:")
  939. if prefix: line = line[len(prefix):] # strip prefix
  940. if suffix: line = line[:-len(suffix)] # strip suffix
  941. line = line.strip()
  942. if continued_for:
  943. variable = continued_for
  944. if line.endswith('\\'):
  945. line = line[:-1].rstrip()
  946. else:
  947. continued_for = None
  948. emacs_vars[variable] += ' ' + line
  949. else:
  950. try:
  951. variable, value = line.split(':', 1)
  952. except ValueError:
  953. log.debug("local variables error: missing colon "
  954. "in local variables entry: '%s'" % line)
  955. continue
  956. # Do NOT lowercase the variable name, because Emacs only
  957. # allows "mode" (and not "Mode", "MoDe", etc.) in this block.
  958. value = value.strip()
  959. if value.endswith('\\'):
  960. value = value[:-1].rstrip()
  961. continued_for = variable
  962. else:
  963. continued_for = None
  964. emacs_vars[variable] = value
  965. # Unquote values.
  966. for var, val in emacs_vars.items():
  967. if len(val) > 1 and (val.startswith('"') and val.endswith('"')
  968. or val.startswith('"') and val.endswith('"')):
  969. emacs_vars[var] = val[1:-1]
  970. self._emacs_tail_vars_cache = emacs_vars
  971. return emacs_vars
  972. # Note: It might nice if parser also gave which of 'vi, vim, ex' and
  973. # the range in the accessor.
  974. _vi_vars_pats_and_splitters = [
  975. (re.compile(r'[ \t]+(vi|vim([<>=]?\d{3})?|ex):\s*set? (?P<rhs>.*?)(?<!\\):', re.M),
  976. re.compile(r'[ \t]+')),
  977. (re.compile(r'[ \t]+(vi|vim([<>=]?\d{3})?|ex):\s*(?P<rhs>.*?)$', re.M),
  978. re.compile(r'[ \t:]+')),
  979. (re.compile(r'^(vi|vim([<>=]?\d{3})?):\s*set? (?P<rhs>.*?)(?<!\\):', re.M),
  980. re.compile(r'[ \t]+')),
  981. ]
  982. _vi_vars_cache = None
  983. def _get_vi_vars(self, bytes):
  984. r"""Return a dict of Vi[m] modeline vars.
  985. See ":help modeline" in Vim for a spec.
  986. >>> TextInfo()._get_vi_vars("/* vim: set ai tw=75: */")
  987. {'ai': None, 'tw': 75}
  988. >>> TextInfo()._get_vi_vars("vim: set ai tw=75: bar")
  989. {'ai': None, 'tw': 75}
  990. >>> TextInfo()._get_vi_vars("vi: set foo:bar")
  991. {'foo': None}
  992. >>> TextInfo()._get_vi_vars(" vi: se foo:bar")
  993. {'foo': None}
  994. >>> TextInfo()._get_vi_vars(" ex: se foo:bar")
  995. {'foo': None}
  996. >>> TextInfo()._get_vi_vars(" vi:noai:sw=3 tw=75")
  997. {'tw': 75, 'sw': 3, 'noai': None}
  998. >>> TextInfo()._get_vi_vars(" vi:noai:sw=3 tw=75")
  999. {'tw': 75, 'sw': 3, 'noai': None}
  1000. >>> TextInfo()._get_vi_vars("ex: se foo:bar")
  1001. {}
  1002. Some edge cases:
  1003. >>> TextInfo()._get_vi_vars(r"/* vi:set dir=c\:\tmp: */")
  1004. {'dir': 'c:\\tmp'}
  1005. """
  1006. # Presume 8-bit encoding... yada yada.
  1007. if self._vi_vars_cache is not None:
  1008. return self._vi_vars_cache
  1009. vi_vars = {}
  1010. #TODO: Consider reducing support to just "vi:" for speed. This
  1011. # function takes way too much time.
  1012. if "vi:" not in bytes and "ex:" not in bytes and "vim:" not in bytes:
  1013. self._vi_vars_cache = vi_vars
  1014. return vi_vars
  1015. for pat, splitter in self._vi_vars_pats_and_splitters:
  1016. match = pat.search(bytes)
  1017. if match:
  1018. for var_str in splitter.split(match.group("rhs")):
  1019. if '=' in var_str:
  1020. name, value = var_str.split('=', 1)
  1021. try:
  1022. vi_vars[name] = int(value)
  1023. except ValueError:
  1024. vi_vars[name] = value.replace('\\:', ':')
  1025. else:
  1026. vi_vars[var_str] = None
  1027. break
  1028. self._vi_vars_cache = vi_vars
  1029. return vi_vars
  1030. def _get_bom_info(self):
  1031. r"""Returns (<has-bom>, <bom>, <bom-encoding>). Examples:
  1032. (True, '\xef\xbb\xbf', "utf-8")
  1033. (True, '\xff\xfe', "utf-16-le")
  1034. (False, None, None)
  1035. """
  1036. boms_and_encodings = [ # in order from longest to shortest
  1037. (codecs.BOM_UTF32_LE, "utf-32-le"),
  1038. (codecs.BOM_UTF32_BE, "utf-32-be"),
  1039. (codecs.BOM_UTF8, "utf-8"),
  1040. (codecs.BOM_UTF16_LE, "utf-16-le"),
  1041. (codecs.BOM_UTF16_BE, "utf-16-be"),
  1042. ]
  1043. head_4 = self._accessor.head_4_bytes
  1044. for bom, encoding in boms_and_encodings:
  1045. if head_4.startswith(bom):
  1046. return (True, bom, encoding)
  1047. break
  1048. else:
  1049. return (False, None, None)
  1050. def _classify_from_filename(self, lidb, env):
  1051. """Classify from the path *filename* only.

Large files files are truncated, but you can click here to view the full file