PageRenderTime 59ms CodeModel.GetById 19ms RepoModel.GetById 0ms app.codeStats 1ms

/lib-python/2.7/codecs.py

https://bitbucket.org/dac_io/pypy
Python | 1098 lines | 1036 code | 25 blank | 37 comment | 4 complexity | 696baaac4728ba1a100419b4f696a4dd MD5 | raw file
  1. """ codecs -- Python Codec Registry, API and helpers.
  2. Written by Marc-Andre Lemburg (mal@lemburg.com).
  3. (c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
  4. """#"
  5. import __builtin__, sys
  6. ### Registry and builtin stateless codec functions
  7. try:
  8. from _codecs import *
  9. except ImportError, why:
  10. raise SystemError('Failed to load the builtin codecs: %s' % why)
  11. __all__ = ["register", "lookup", "open", "EncodedFile", "BOM", "BOM_BE",
  12. "BOM_LE", "BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE",
  13. "BOM_UTF8", "BOM_UTF16", "BOM_UTF16_LE", "BOM_UTF16_BE",
  14. "BOM_UTF32", "BOM_UTF32_LE", "BOM_UTF32_BE",
  15. "strict_errors", "ignore_errors", "replace_errors",
  16. "xmlcharrefreplace_errors",
  17. "register_error", "lookup_error"]
  18. ### Constants
  19. #
  20. # Byte Order Mark (BOM = ZERO WIDTH NO-BREAK SPACE = U+FEFF)
  21. # and its possible byte string values
  22. # for UTF8/UTF16/UTF32 output and little/big endian machines
  23. #
  24. # UTF-8
  25. BOM_UTF8 = '\xef\xbb\xbf'
  26. # UTF-16, little endian
  27. BOM_LE = BOM_UTF16_LE = '\xff\xfe'
  28. # UTF-16, big endian
  29. BOM_BE = BOM_UTF16_BE = '\xfe\xff'
  30. # UTF-32, little endian
  31. BOM_UTF32_LE = '\xff\xfe\x00\x00'
  32. # UTF-32, big endian
  33. BOM_UTF32_BE = '\x00\x00\xfe\xff'
  34. if sys.byteorder == 'little':
  35. # UTF-16, native endianness
  36. BOM = BOM_UTF16 = BOM_UTF16_LE
  37. # UTF-32, native endianness
  38. BOM_UTF32 = BOM_UTF32_LE
  39. else:
  40. # UTF-16, native endianness
  41. BOM = BOM_UTF16 = BOM_UTF16_BE
  42. # UTF-32, native endianness
  43. BOM_UTF32 = BOM_UTF32_BE
  44. # Old broken names (don't use in new code)
  45. BOM32_LE = BOM_UTF16_LE
  46. BOM32_BE = BOM_UTF16_BE
  47. BOM64_LE = BOM_UTF32_LE
  48. BOM64_BE = BOM_UTF32_BE
  49. ### Codec base classes (defining the API)
  50. class CodecInfo(tuple):
  51. def __new__(cls, encode, decode, streamreader=None, streamwriter=None,
  52. incrementalencoder=None, incrementaldecoder=None, name=None):
  53. self = tuple.__new__(cls, (encode, decode, streamreader, streamwriter))
  54. self.name = name
  55. self.encode = encode
  56. self.decode = decode
  57. self.incrementalencoder = incrementalencoder
  58. self.incrementaldecoder = incrementaldecoder
  59. self.streamwriter = streamwriter
  60. self.streamreader = streamreader
  61. return self
  62. def __repr__(self):
  63. return "<%s.%s object for encoding %s at 0x%x>" % (self.__class__.__module__, self.__class__.__name__, self.name, id(self))
  64. class Codec:
  65. """ Defines the interface for stateless encoders/decoders.
  66. The .encode()/.decode() methods may use different error
  67. handling schemes by providing the errors argument. These
  68. string values are predefined:
  69. 'strict' - raise a ValueError error (or a subclass)
  70. 'ignore' - ignore the character and continue with the next
  71. 'replace' - replace with a suitable replacement character;
  72. Python will use the official U+FFFD REPLACEMENT
  73. CHARACTER for the builtin Unicode codecs on
  74. decoding and '?' on encoding.
  75. 'xmlcharrefreplace' - Replace with the appropriate XML
  76. character reference (only for encoding).
  77. 'backslashreplace' - Replace with backslashed escape sequences
  78. (only for encoding).
  79. The set of allowed values can be extended via register_error.
  80. """
  81. def encode(self, input, errors='strict'):
  82. """ Encodes the object input and returns a tuple (output
  83. object, length consumed).
  84. errors defines the error handling to apply. It defaults to
  85. 'strict' handling.
  86. The method may not store state in the Codec instance. Use
  87. StreamCodec for codecs which have to keep state in order to
  88. make encoding/decoding efficient.
  89. The encoder must be able to handle zero length input and
  90. return an empty object of the output object type in this
  91. situation.
  92. """
  93. raise NotImplementedError
  94. def decode(self, input, errors='strict'):
  95. """ Decodes the object input and returns a tuple (output
  96. object, length consumed).
  97. input must be an object which provides the bf_getreadbuf
  98. buffer slot. Python strings, buffer objects and memory
  99. mapped files are examples of objects providing this slot.
  100. errors defines the error handling to apply. It defaults to
  101. 'strict' handling.
  102. The method may not store state in the Codec instance. Use
  103. StreamCodec for codecs which have to keep state in order to
  104. make encoding/decoding efficient.
  105. The decoder must be able to handle zero length input and
  106. return an empty object of the output object type in this
  107. situation.
  108. """
  109. raise NotImplementedError
  110. class IncrementalEncoder(object):
  111. """
  112. An IncrementalEncoder encodes an input in multiple steps. The input can be
  113. passed piece by piece to the encode() method. The IncrementalEncoder remembers
  114. the state of the Encoding process between calls to encode().
  115. """
  116. def __init__(self, errors='strict'):
  117. """
  118. Creates an IncrementalEncoder instance.
  119. The IncrementalEncoder may use different error handling schemes by
  120. providing the errors keyword argument. See the module docstring
  121. for a list of possible values.
  122. """
  123. self.errors = errors
  124. self.buffer = ""
  125. def encode(self, input, final=False):
  126. """
  127. Encodes input and returns the resulting object.
  128. """
  129. raise NotImplementedError
  130. def reset(self):
  131. """
  132. Resets the encoder to the initial state.
  133. """
  134. def getstate(self):
  135. """
  136. Return the current state of the encoder.
  137. """
  138. return 0
  139. def setstate(self, state):
  140. """
  141. Set the current state of the encoder. state must have been
  142. returned by getstate().
  143. """
  144. class BufferedIncrementalEncoder(IncrementalEncoder):
  145. """
  146. This subclass of IncrementalEncoder can be used as the baseclass for an
  147. incremental encoder if the encoder must keep some of the output in a
  148. buffer between calls to encode().
  149. """
  150. def __init__(self, errors='strict'):
  151. IncrementalEncoder.__init__(self, errors)
  152. self.buffer = "" # unencoded input that is kept between calls to encode()
  153. def _buffer_encode(self, input, errors, final):
  154. # Overwrite this method in subclasses: It must encode input
  155. # and return an (output, length consumed) tuple
  156. raise NotImplementedError
  157. def encode(self, input, final=False):
  158. # encode input (taking the buffer into account)
  159. data = self.buffer + input
  160. (result, consumed) = self._buffer_encode(data, self.errors, final)
  161. # keep unencoded input until the next call
  162. self.buffer = data[consumed:]
  163. return result
  164. def reset(self):
  165. IncrementalEncoder.reset(self)
  166. self.buffer = ""
  167. def getstate(self):
  168. return self.buffer or 0
  169. def setstate(self, state):
  170. self.buffer = state or ""
  171. class IncrementalDecoder(object):
  172. """
  173. An IncrementalDecoder decodes an input in multiple steps. The input can be
  174. passed piece by piece to the decode() method. The IncrementalDecoder
  175. remembers the state of the decoding process between calls to decode().
  176. """
  177. def __init__(self, errors='strict'):
  178. """
  179. Creates a IncrementalDecoder instance.
  180. The IncrementalDecoder may use different error handling schemes by
  181. providing the errors keyword argument. See the module docstring
  182. for a list of possible values.
  183. """
  184. self.errors = errors
  185. def decode(self, input, final=False):
  186. """
  187. Decodes input and returns the resulting object.
  188. """
  189. raise NotImplementedError
  190. def reset(self):
  191. """
  192. Resets the decoder to the initial state.
  193. """
  194. def getstate(self):
  195. """
  196. Return the current state of the decoder.
  197. This must be a (buffered_input, additional_state_info) tuple.
  198. buffered_input must be a bytes object containing bytes that
  199. were passed to decode() that have not yet been converted.
  200. additional_state_info must be a non-negative integer
  201. representing the state of the decoder WITHOUT yet having
  202. processed the contents of buffered_input. In the initial state
  203. and after reset(), getstate() must return (b"", 0).
  204. """
  205. return (b"", 0)
  206. def setstate(self, state):
  207. """
  208. Set the current state of the decoder.
  209. state must have been returned by getstate(). The effect of
  210. setstate((b"", 0)) must be equivalent to reset().
  211. """
  212. class BufferedIncrementalDecoder(IncrementalDecoder):
  213. """
  214. This subclass of IncrementalDecoder can be used as the baseclass for an
  215. incremental decoder if the decoder must be able to handle incomplete byte
  216. sequences.
  217. """
  218. def __init__(self, errors='strict'):
  219. IncrementalDecoder.__init__(self, errors)
  220. self.buffer = "" # undecoded input that is kept between calls to decode()
  221. def _buffer_decode(self, input, errors, final):
  222. # Overwrite this method in subclasses: It must decode input
  223. # and return an (output, length consumed) tuple
  224. raise NotImplementedError
  225. def decode(self, input, final=False):
  226. # decode input (taking the buffer into account)
  227. data = self.buffer + input
  228. (result, consumed) = self._buffer_decode(data, self.errors, final)
  229. # keep undecoded input until the next call
  230. self.buffer = data[consumed:]
  231. return result
  232. def reset(self):
  233. IncrementalDecoder.reset(self)
  234. self.buffer = ""
  235. def getstate(self):
  236. # additional state info is always 0
  237. return (self.buffer, 0)
  238. def setstate(self, state):
  239. # ignore additional state info
  240. self.buffer = state[0]
  241. #
  242. # The StreamWriter and StreamReader class provide generic working
  243. # interfaces which can be used to implement new encoding submodules
  244. # very easily. See encodings/utf_8.py for an example on how this is
  245. # done.
  246. #
  247. class StreamWriter(Codec):
  248. def __init__(self, stream, errors='strict'):
  249. """ Creates a StreamWriter instance.
  250. stream must be a file-like object open for writing
  251. (binary) data.
  252. The StreamWriter may use different error handling
  253. schemes by providing the errors keyword argument. These
  254. parameters are predefined:
  255. 'strict' - raise a ValueError (or a subclass)
  256. 'ignore' - ignore the character and continue with the next
  257. 'replace'- replace with a suitable replacement character
  258. 'xmlcharrefreplace' - Replace with the appropriate XML
  259. character reference.
  260. 'backslashreplace' - Replace with backslashed escape
  261. sequences (only for encoding).
  262. The set of allowed parameter values can be extended via
  263. register_error.
  264. """
  265. self.stream = stream
  266. self.errors = errors
  267. def write(self, object):
  268. """ Writes the object's contents encoded to self.stream.
  269. """
  270. data, consumed = self.encode(object, self.errors)
  271. self.stream.write(data)
  272. def writelines(self, list):
  273. """ Writes the concatenated list of strings to the stream
  274. using .write().
  275. """
  276. self.write(''.join(list))
  277. def reset(self):
  278. """ Flushes and resets the codec buffers used for keeping state.
  279. Calling this method should ensure that the data on the
  280. output is put into a clean state, that allows appending
  281. of new fresh data without having to rescan the whole
  282. stream to recover state.
  283. """
  284. pass
  285. def seek(self, offset, whence=0):
  286. self.stream.seek(offset, whence)
  287. if whence == 0 and offset == 0:
  288. self.reset()
  289. def __getattr__(self, name,
  290. getattr=getattr):
  291. """ Inherit all other methods from the underlying stream.
  292. """
  293. return getattr(self.stream, name)
  294. def __enter__(self):
  295. return self
  296. def __exit__(self, type, value, tb):
  297. self.stream.close()
  298. ###
  299. class StreamReader(Codec):
  300. def __init__(self, stream, errors='strict'):
  301. """ Creates a StreamReader instance.
  302. stream must be a file-like object open for reading
  303. (binary) data.
  304. The StreamReader may use different error handling
  305. schemes by providing the errors keyword argument. These
  306. parameters are predefined:
  307. 'strict' - raise a ValueError (or a subclass)
  308. 'ignore' - ignore the character and continue with the next
  309. 'replace'- replace with a suitable replacement character;
  310. The set of allowed parameter values can be extended via
  311. register_error.
  312. """
  313. self.stream = stream
  314. self.errors = errors
  315. self.bytebuffer = ""
  316. # For str->str decoding this will stay a str
  317. # For str->unicode decoding the first read will promote it to unicode
  318. self.charbuffer = ""
  319. self.linebuffer = None
  320. def decode(self, input, errors='strict'):
  321. raise NotImplementedError
  322. def read(self, size=-1, chars=-1, firstline=False):
  323. """ Decodes data from the stream self.stream and returns the
  324. resulting object.
  325. chars indicates the number of characters to read from the
  326. stream. read() will never return more than chars
  327. characters, but it might return less, if there are not enough
  328. characters available.
  329. size indicates the approximate maximum number of bytes to
  330. read from the stream for decoding purposes. The decoder
  331. can modify this setting as appropriate. The default value
  332. -1 indicates to read and decode as much as possible. size
  333. is intended to prevent having to decode huge files in one
  334. step.
  335. If firstline is true, and a UnicodeDecodeError happens
  336. after the first line terminator in the input only the first line
  337. will be returned, the rest of the input will be kept until the
  338. next call to read().
  339. The method should use a greedy read strategy meaning that
  340. it should read as much data as is allowed within the
  341. definition of the encoding and the given size, e.g. if
  342. optional encoding endings or state markers are available
  343. on the stream, these should be read too.
  344. """
  345. # If we have lines cached, first merge them back into characters
  346. if self.linebuffer:
  347. self.charbuffer = "".join(self.linebuffer)
  348. self.linebuffer = None
  349. # read until we get the required number of characters (if available)
  350. while True:
  351. # can the request can be satisfied from the character buffer?
  352. if chars < 0:
  353. if size < 0:
  354. if self.charbuffer:
  355. break
  356. elif len(self.charbuffer) >= size:
  357. break
  358. else:
  359. if len(self.charbuffer) >= chars:
  360. break
  361. # we need more data
  362. if size < 0:
  363. newdata = self.stream.read()
  364. else:
  365. newdata = self.stream.read(size)
  366. # decode bytes (those remaining from the last call included)
  367. data = self.bytebuffer + newdata
  368. try:
  369. newchars, decodedbytes = self.decode(data, self.errors)
  370. except UnicodeDecodeError, exc:
  371. if firstline:
  372. newchars, decodedbytes = self.decode(data[:exc.start], self.errors)
  373. lines = newchars.splitlines(True)
  374. if len(lines)<=1:
  375. raise
  376. else:
  377. raise
  378. # keep undecoded bytes until the next call
  379. self.bytebuffer = data[decodedbytes:]
  380. # put new characters in the character buffer
  381. self.charbuffer += newchars
  382. # there was no data available
  383. if not newdata:
  384. break
  385. if chars < 0:
  386. # Return everything we've got
  387. result = self.charbuffer
  388. self.charbuffer = ""
  389. else:
  390. # Return the first chars characters
  391. result = self.charbuffer[:chars]
  392. self.charbuffer = self.charbuffer[chars:]
  393. return result
  394. def readline(self, size=None, keepends=True):
  395. """ Read one line from the input stream and return the
  396. decoded data.
  397. size, if given, is passed as size argument to the
  398. read() method.
  399. """
  400. # If we have lines cached from an earlier read, return
  401. # them unconditionally
  402. if self.linebuffer:
  403. line = self.linebuffer[0]
  404. del self.linebuffer[0]
  405. if len(self.linebuffer) == 1:
  406. # revert to charbuffer mode; we might need more data
  407. # next time
  408. self.charbuffer = self.linebuffer[0]
  409. self.linebuffer = None
  410. if not keepends:
  411. line = line.splitlines(False)[0]
  412. return line
  413. readsize = size or 72
  414. line = ""
  415. # If size is given, we call read() only once
  416. while True:
  417. data = self.read(readsize, firstline=True)
  418. if data:
  419. # If we're at a "\r" read one extra character (which might
  420. # be a "\n") to get a proper line ending. If the stream is
  421. # temporarily exhausted we return the wrong line ending.
  422. if data.endswith("\r"):
  423. data += self.read(size=1, chars=1)
  424. line += data
  425. lines = line.splitlines(True)
  426. if lines:
  427. if len(lines) > 1:
  428. # More than one line result; the first line is a full line
  429. # to return
  430. line = lines[0]
  431. del lines[0]
  432. if len(lines) > 1:
  433. # cache the remaining lines
  434. lines[-1] += self.charbuffer
  435. self.linebuffer = lines
  436. self.charbuffer = None
  437. else:
  438. # only one remaining line, put it back into charbuffer
  439. self.charbuffer = lines[0] + self.charbuffer
  440. if not keepends:
  441. line = line.splitlines(False)[0]
  442. break
  443. line0withend = lines[0]
  444. line0withoutend = lines[0].splitlines(False)[0]
  445. if line0withend != line0withoutend: # We really have a line end
  446. # Put the rest back together and keep it until the next call
  447. self.charbuffer = "".join(lines[1:]) + self.charbuffer
  448. if keepends:
  449. line = line0withend
  450. else:
  451. line = line0withoutend
  452. break
  453. # we didn't get anything or this was our only try
  454. if not data or size is not None:
  455. if line and not keepends:
  456. line = line.splitlines(False)[0]
  457. break
  458. if readsize<8000:
  459. readsize *= 2
  460. return line
  461. def readlines(self, sizehint=None, keepends=True):
  462. """ Read all lines available on the input stream
  463. and return them as list of lines.
  464. Line breaks are implemented using the codec's decoder
  465. method and are included in the list entries.
  466. sizehint, if given, is ignored since there is no efficient
  467. way to finding the true end-of-line.
  468. """
  469. data = self.read()
  470. return data.splitlines(keepends)
  471. def reset(self):
  472. """ Resets the codec buffers used for keeping state.
  473. Note that no stream repositioning should take place.
  474. This method is primarily intended to be able to recover
  475. from decoding errors.
  476. """
  477. self.bytebuffer = ""
  478. self.charbuffer = u""
  479. self.linebuffer = None
  480. def seek(self, offset, whence=0):
  481. """ Set the input stream's current position.
  482. Resets the codec buffers used for keeping state.
  483. """
  484. self.stream.seek(offset, whence)
  485. self.reset()
  486. def next(self):
  487. """ Return the next decoded line from the input stream."""
  488. line = self.readline()
  489. if line:
  490. return line
  491. raise StopIteration
  492. def __iter__(self):
  493. return self
  494. def __getattr__(self, name,
  495. getattr=getattr):
  496. """ Inherit all other methods from the underlying stream.
  497. """
  498. return getattr(self.stream, name)
  499. def __enter__(self):
  500. return self
  501. def __exit__(self, type, value, tb):
  502. self.stream.close()
  503. ###
  504. class StreamReaderWriter:
  505. """ StreamReaderWriter instances allow wrapping streams which
  506. work in both read and write modes.
  507. The design is such that one can use the factory functions
  508. returned by the codec.lookup() function to construct the
  509. instance.
  510. """
  511. # Optional attributes set by the file wrappers below
  512. encoding = 'unknown'
  513. def __init__(self, stream, Reader, Writer, errors='strict'):
  514. """ Creates a StreamReaderWriter instance.
  515. stream must be a Stream-like object.
  516. Reader, Writer must be factory functions or classes
  517. providing the StreamReader, StreamWriter interface resp.
  518. Error handling is done in the same way as defined for the
  519. StreamWriter/Readers.
  520. """
  521. self.stream = stream
  522. self.reader = Reader(stream, errors)
  523. self.writer = Writer(stream, errors)
  524. self.errors = errors
  525. def read(self, size=-1):
  526. return self.reader.read(size)
  527. def readline(self, size=None):
  528. return self.reader.readline(size)
  529. def readlines(self, sizehint=None):
  530. return self.reader.readlines(sizehint)
  531. def next(self):
  532. """ Return the next decoded line from the input stream."""
  533. return self.reader.next()
  534. def __iter__(self):
  535. return self
  536. def write(self, data):
  537. return self.writer.write(data)
  538. def writelines(self, list):
  539. return self.writer.writelines(list)
  540. def reset(self):
  541. self.reader.reset()
  542. self.writer.reset()
  543. def seek(self, offset, whence=0):
  544. self.stream.seek(offset, whence)
  545. self.reader.reset()
  546. if whence == 0 and offset == 0:
  547. self.writer.reset()
  548. def __getattr__(self, name,
  549. getattr=getattr):
  550. """ Inherit all other methods from the underlying stream.
  551. """
  552. return getattr(self.stream, name)
  553. # these are needed to make "with codecs.open(...)" work properly
  554. def __enter__(self):
  555. return self
  556. def __exit__(self, type, value, tb):
  557. self.stream.close()
  558. ###
  559. class StreamRecoder:
  560. """ StreamRecoder instances provide a frontend - backend
  561. view of encoding data.
  562. They use the complete set of APIs returned by the
  563. codecs.lookup() function to implement their task.
  564. Data written to the stream is first decoded into an
  565. intermediate format (which is dependent on the given codec
  566. combination) and then written to the stream using an instance
  567. of the provided Writer class.
  568. In the other direction, data is read from the stream using a
  569. Reader instance and then return encoded data to the caller.
  570. """
  571. # Optional attributes set by the file wrappers below
  572. data_encoding = 'unknown'
  573. file_encoding = 'unknown'
  574. def __init__(self, stream, encode, decode, Reader, Writer,
  575. errors='strict'):
  576. """ Creates a StreamRecoder instance which implements a two-way
  577. conversion: encode and decode work on the frontend (the
  578. input to .read() and output of .write()) while
  579. Reader and Writer work on the backend (reading and
  580. writing to the stream).
  581. You can use these objects to do transparent direct
  582. recodings from e.g. latin-1 to utf-8 and back.
  583. stream must be a file-like object.
  584. encode, decode must adhere to the Codec interface, Reader,
  585. Writer must be factory functions or classes providing the
  586. StreamReader, StreamWriter interface resp.
  587. encode and decode are needed for the frontend translation,
  588. Reader and Writer for the backend translation. Unicode is
  589. used as intermediate encoding.
  590. Error handling is done in the same way as defined for the
  591. StreamWriter/Readers.
  592. """
  593. self.stream = stream
  594. self.encode = encode
  595. self.decode = decode
  596. self.reader = Reader(stream, errors)
  597. self.writer = Writer(stream, errors)
  598. self.errors = errors
  599. def read(self, size=-1):
  600. data = self.reader.read(size)
  601. data, bytesencoded = self.encode(data, self.errors)
  602. return data
  603. def readline(self, size=None):
  604. if size is None:
  605. data = self.reader.readline()
  606. else:
  607. data = self.reader.readline(size)
  608. data, bytesencoded = self.encode(data, self.errors)
  609. return data
  610. def readlines(self, sizehint=None):
  611. data = self.reader.read()
  612. data, bytesencoded = self.encode(data, self.errors)
  613. return data.splitlines(1)
  614. def next(self):
  615. """ Return the next decoded line from the input stream."""
  616. data = self.reader.next()
  617. data, bytesencoded = self.encode(data, self.errors)
  618. return data
  619. def __iter__(self):
  620. return self
  621. def write(self, data):
  622. data, bytesdecoded = self.decode(data, self.errors)
  623. return self.writer.write(data)
  624. def writelines(self, list):
  625. data = ''.join(list)
  626. data, bytesdecoded = self.decode(data, self.errors)
  627. return self.writer.write(data)
  628. def reset(self):
  629. self.reader.reset()
  630. self.writer.reset()
  631. def __getattr__(self, name,
  632. getattr=getattr):
  633. """ Inherit all other methods from the underlying stream.
  634. """
  635. return getattr(self.stream, name)
  636. def __enter__(self):
  637. return self
  638. def __exit__(self, type, value, tb):
  639. self.stream.close()
  640. ### Shortcuts
  641. def open(filename, mode='rb', encoding=None, errors='strict', buffering=1):
  642. """ Open an encoded file using the given mode and return
  643. a wrapped version providing transparent encoding/decoding.
  644. Note: The wrapped version will only accept the object format
  645. defined by the codecs, i.e. Unicode objects for most builtin
  646. codecs. Output is also codec dependent and will usually be
  647. Unicode as well.
  648. Files are always opened in binary mode, even if no binary mode
  649. was specified. This is done to avoid data loss due to encodings
  650. using 8-bit values. The default file mode is 'rb' meaning to
  651. open the file in binary read mode.
  652. encoding specifies the encoding which is to be used for the
  653. file.
  654. errors may be given to define the error handling. It defaults
  655. to 'strict' which causes ValueErrors to be raised in case an
  656. encoding error occurs.
  657. buffering has the same meaning as for the builtin open() API.
  658. It defaults to line buffered.
  659. The returned wrapped file object provides an extra attribute
  660. .encoding which allows querying the used encoding. This
  661. attribute is only available if an encoding was specified as
  662. parameter.
  663. """
  664. if encoding is not None:
  665. if 'U' in mode:
  666. # No automatic conversion of '\n' is done on reading and writing
  667. mode = mode.strip().replace('U', '')
  668. if mode[:1] not in set('rwa'):
  669. mode = 'r' + mode
  670. if 'b' not in mode:
  671. # Force opening of the file in binary mode
  672. mode = mode + 'b'
  673. file = __builtin__.open(filename, mode, buffering)
  674. if encoding is None:
  675. return file
  676. info = lookup(encoding)
  677. srw = StreamReaderWriter(file, info.streamreader, info.streamwriter, errors)
  678. # Add attributes to simplify introspection
  679. srw.encoding = encoding
  680. return srw
  681. def EncodedFile(file, data_encoding, file_encoding=None, errors='strict'):
  682. """ Return a wrapped version of file which provides transparent
  683. encoding translation.
  684. Strings written to the wrapped file are interpreted according
  685. to the given data_encoding and then written to the original
  686. file as string using file_encoding. The intermediate encoding
  687. will usually be Unicode but depends on the specified codecs.
  688. Strings are read from the file using file_encoding and then
  689. passed back to the caller as string using data_encoding.
  690. If file_encoding is not given, it defaults to data_encoding.
  691. errors may be given to define the error handling. It defaults
  692. to 'strict' which causes ValueErrors to be raised in case an
  693. encoding error occurs.
  694. The returned wrapped file object provides two extra attributes
  695. .data_encoding and .file_encoding which reflect the given
  696. parameters of the same name. The attributes can be used for
  697. introspection by Python programs.
  698. """
  699. if file_encoding is None:
  700. file_encoding = data_encoding
  701. data_info = lookup(data_encoding)
  702. file_info = lookup(file_encoding)
  703. sr = StreamRecoder(file, data_info.encode, data_info.decode,
  704. file_info.streamreader, file_info.streamwriter, errors)
  705. # Add attributes to simplify introspection
  706. sr.data_encoding = data_encoding
  707. sr.file_encoding = file_encoding
  708. return sr
  709. ### Helpers for codec lookup
  710. def getencoder(encoding):
  711. """ Lookup up the codec for the given encoding and return
  712. its encoder function.
  713. Raises a LookupError in case the encoding cannot be found.
  714. """
  715. return lookup(encoding).encode
  716. def getdecoder(encoding):
  717. """ Lookup up the codec for the given encoding and return
  718. its decoder function.
  719. Raises a LookupError in case the encoding cannot be found.
  720. """
  721. return lookup(encoding).decode
  722. def getincrementalencoder(encoding):
  723. """ Lookup up the codec for the given encoding and return
  724. its IncrementalEncoder class or factory function.
  725. Raises a LookupError in case the encoding cannot be found
  726. or the codecs doesn't provide an incremental encoder.
  727. """
  728. encoder = lookup(encoding).incrementalencoder
  729. if encoder is None:
  730. raise LookupError(encoding)
  731. return encoder
  732. def getincrementaldecoder(encoding):
  733. """ Lookup up the codec for the given encoding and return
  734. its IncrementalDecoder class or factory function.
  735. Raises a LookupError in case the encoding cannot be found
  736. or the codecs doesn't provide an incremental decoder.
  737. """
  738. decoder = lookup(encoding).incrementaldecoder
  739. if decoder is None:
  740. raise LookupError(encoding)
  741. return decoder
  742. def getreader(encoding):
  743. """ Lookup up the codec for the given encoding and return
  744. its StreamReader class or factory function.
  745. Raises a LookupError in case the encoding cannot be found.
  746. """
  747. return lookup(encoding).streamreader
  748. def getwriter(encoding):
  749. """ Lookup up the codec for the given encoding and return
  750. its StreamWriter class or factory function.
  751. Raises a LookupError in case the encoding cannot be found.
  752. """
  753. return lookup(encoding).streamwriter
  754. def iterencode(iterator, encoding, errors='strict', **kwargs):
  755. """
  756. Encoding iterator.
  757. Encodes the input strings from the iterator using a IncrementalEncoder.
  758. errors and kwargs are passed through to the IncrementalEncoder
  759. constructor.
  760. """
  761. encoder = getincrementalencoder(encoding)(errors, **kwargs)
  762. for input in iterator:
  763. output = encoder.encode(input)
  764. if output:
  765. yield output
  766. output = encoder.encode("", True)
  767. if output:
  768. yield output
  769. def iterdecode(iterator, encoding, errors='strict', **kwargs):
  770. """
  771. Decoding iterator.
  772. Decodes the input strings from the iterator using a IncrementalDecoder.
  773. errors and kwargs are passed through to the IncrementalDecoder
  774. constructor.
  775. """
  776. decoder = getincrementaldecoder(encoding)(errors, **kwargs)
  777. for input in iterator:
  778. output = decoder.decode(input)
  779. if output:
  780. yield output
  781. output = decoder.decode("", True)
  782. if output:
  783. yield output
  784. ### Helpers for charmap-based codecs
  785. def make_identity_dict(rng):
  786. """ make_identity_dict(rng) -> dict
  787. Return a dictionary where elements of the rng sequence are
  788. mapped to themselves.
  789. """
  790. res = {}
  791. for i in rng:
  792. res[i]=i
  793. return res
  794. def make_encoding_map(decoding_map):
  795. """ Creates an encoding map from a decoding map.
  796. If a target mapping in the decoding map occurs multiple
  797. times, then that target is mapped to None (undefined mapping),
  798. causing an exception when encountered by the charmap codec
  799. during translation.
  800. One example where this happens is cp875.py which decodes
  801. multiple character to \u001a.
  802. """
  803. m = {}
  804. for k,v in decoding_map.items():
  805. if not v in m:
  806. m[v] = k
  807. else:
  808. m[v] = None
  809. return m
  810. ### error handlers
  811. try:
  812. strict_errors = lookup_error("strict")
  813. ignore_errors = lookup_error("ignore")
  814. replace_errors = lookup_error("replace")
  815. xmlcharrefreplace_errors = lookup_error("xmlcharrefreplace")
  816. backslashreplace_errors = lookup_error("backslashreplace")
  817. except LookupError:
  818. # In --disable-unicode builds, these error handler are missing
  819. strict_errors = None
  820. ignore_errors = None
  821. replace_errors = None
  822. xmlcharrefreplace_errors = None
  823. backslashreplace_errors = None
  824. # Tell modulefinder that using codecs probably needs the encodings
  825. # package
  826. _false = 0
  827. if _false:
  828. import encodings
  829. ### Tests
  830. if __name__ == '__main__':
  831. # Make stdout translate Latin-1 output into UTF-8 output
  832. sys.stdout = EncodedFile(sys.stdout, 'latin-1', 'utf-8')
  833. # Have stdin translate Latin-1 input into UTF-8 input
  834. sys.stdin = EncodedFile(sys.stdin, 'utf-8', 'latin-1')