PageRenderTime 57ms CodeModel.GetById 20ms RepoModel.GetById 1ms app.codeStats 0ms

/lib-python/modified-2.7/test/test_unicode.py

https://bitbucket.org/dac_io/pypy
Python | 1406 lines | 1328 code | 28 blank | 50 comment | 21 complexity | 55bdadb49e8cdeb776b88be9b7a33a75 MD5 | raw file

Large files files are truncated, but you can click here to view the full file

  1. """ Test script for the Unicode implementation.
  2. Written by Marc-Andre Lemburg (mal@lemburg.com).
  3. (c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
  4. """#"
  5. import sys, struct, codecs
  6. from test import test_support, string_tests
  7. # Error handling (bad decoder return)
  8. def search_function(encoding):
  9. def decode1(input, errors="strict"):
  10. return 42 # not a tuple
  11. def encode1(input, errors="strict"):
  12. return 42 # not a tuple
  13. def encode2(input, errors="strict"):
  14. return (42, 42) # no unicode
  15. def decode2(input, errors="strict"):
  16. return (42, 42) # no unicode
  17. if encoding=="test.unicode1":
  18. return (encode1, decode1, None, None)
  19. elif encoding=="test.unicode2":
  20. return (encode2, decode2, None, None)
  21. else:
  22. return None
  23. codecs.register(search_function)
  24. class UnicodeTest(
  25. string_tests.CommonTest,
  26. string_tests.MixinStrUnicodeUserStringTest,
  27. string_tests.MixinStrUnicodeTest,
  28. ):
  29. type2test = unicode
  30. def assertEqual(self, first, second, msg=None):
  31. # strict assertEqual method: reject implicit bytes/unicode equality
  32. super(UnicodeTest, self).assertEqual(first, second, msg)
  33. if isinstance(first, unicode) or isinstance(second, unicode):
  34. self.assertIsInstance(first, unicode)
  35. self.assertIsInstance(second, unicode)
  36. elif isinstance(first, str) or isinstance(second, str):
  37. self.assertIsInstance(first, str)
  38. self.assertIsInstance(second, str)
  39. def checkequalnofix(self, result, object, methodname, *args):
  40. method = getattr(object, methodname)
  41. realresult = method(*args)
  42. self.assertEqual(realresult, result)
  43. self.assertTrue(type(realresult) is type(result))
  44. # if the original is returned make sure that
  45. # this doesn't happen with subclasses
  46. if realresult is object:
  47. class usub(unicode):
  48. def __repr__(self):
  49. return 'usub(%r)' % unicode.__repr__(self)
  50. object = usub(object)
  51. method = getattr(object, methodname)
  52. realresult = method(*args)
  53. self.assertEqual(realresult, result)
  54. self.assertTrue(object is not realresult)
  55. def test_literals(self):
  56. self.assertEqual(u'\xff', u'\u00ff')
  57. self.assertEqual(u'\uffff', u'\U0000ffff')
  58. self.assertRaises(SyntaxError, eval, 'u\'\\Ufffffffe\'')
  59. self.assertRaises(SyntaxError, eval, 'u\'\\Uffffffff\'')
  60. self.assertRaises(SyntaxError, eval, 'u\'\\U%08x\'' % 0x110000)
  61. def test_repr(self):
  62. if not sys.platform.startswith('java'):
  63. # Test basic sanity of repr()
  64. self.assertEqual(repr(u'abc'), "u'abc'")
  65. self.assertEqual(repr(u'ab\\c'), "u'ab\\\\c'")
  66. self.assertEqual(repr(u'ab\\'), "u'ab\\\\'")
  67. self.assertEqual(repr(u'\\c'), "u'\\\\c'")
  68. self.assertEqual(repr(u'\\'), "u'\\\\'")
  69. self.assertEqual(repr(u'\n'), "u'\\n'")
  70. self.assertEqual(repr(u'\r'), "u'\\r'")
  71. self.assertEqual(repr(u'\t'), "u'\\t'")
  72. self.assertEqual(repr(u'\b'), "u'\\x08'")
  73. self.assertEqual(repr(u"'\""), """u'\\'"'""")
  74. self.assertEqual(repr(u"'\""), """u'\\'"'""")
  75. self.assertEqual(repr(u"'"), '''u"'"''')
  76. self.assertEqual(repr(u'"'), """u'"'""")
  77. latin1repr = (
  78. "u'\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\\t\\n\\x0b\\x0c\\r"
  79. "\\x0e\\x0f\\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\\x18\\x19\\x1a"
  80. "\\x1b\\x1c\\x1d\\x1e\\x1f !\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHI"
  81. "JKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\\x7f"
  82. "\\x80\\x81\\x82\\x83\\x84\\x85\\x86\\x87\\x88\\x89\\x8a\\x8b\\x8c\\x8d"
  83. "\\x8e\\x8f\\x90\\x91\\x92\\x93\\x94\\x95\\x96\\x97\\x98\\x99\\x9a\\x9b"
  84. "\\x9c\\x9d\\x9e\\x9f\\xa0\\xa1\\xa2\\xa3\\xa4\\xa5\\xa6\\xa7\\xa8\\xa9"
  85. "\\xaa\\xab\\xac\\xad\\xae\\xaf\\xb0\\xb1\\xb2\\xb3\\xb4\\xb5\\xb6\\xb7"
  86. "\\xb8\\xb9\\xba\\xbb\\xbc\\xbd\\xbe\\xbf\\xc0\\xc1\\xc2\\xc3\\xc4\\xc5"
  87. "\\xc6\\xc7\\xc8\\xc9\\xca\\xcb\\xcc\\xcd\\xce\\xcf\\xd0\\xd1\\xd2\\xd3"
  88. "\\xd4\\xd5\\xd6\\xd7\\xd8\\xd9\\xda\\xdb\\xdc\\xdd\\xde\\xdf\\xe0\\xe1"
  89. "\\xe2\\xe3\\xe4\\xe5\\xe6\\xe7\\xe8\\xe9\\xea\\xeb\\xec\\xed\\xee\\xef"
  90. "\\xf0\\xf1\\xf2\\xf3\\xf4\\xf5\\xf6\\xf7\\xf8\\xf9\\xfa\\xfb\\xfc\\xfd"
  91. "\\xfe\\xff'")
  92. testrepr = repr(u''.join(map(unichr, xrange(256))))
  93. self.assertEqual(testrepr, latin1repr)
  94. # Test repr works on wide unicode escapes without overflow.
  95. self.assertEqual(repr(u"\U00010000" * 39 + u"\uffff" * 4096),
  96. repr(u"\U00010000" * 39 + u"\uffff" * 4096))
  97. def test_count(self):
  98. string_tests.CommonTest.test_count(self)
  99. # check mixed argument types
  100. self.checkequalnofix(3, 'aaa', 'count', u'a')
  101. self.checkequalnofix(0, 'aaa', 'count', u'b')
  102. self.checkequalnofix(3, u'aaa', 'count', 'a')
  103. self.checkequalnofix(0, u'aaa', 'count', 'b')
  104. self.checkequalnofix(0, u'aaa', 'count', 'b')
  105. self.checkequalnofix(1, u'aaa', 'count', 'a', -1)
  106. self.checkequalnofix(3, u'aaa', 'count', 'a', -10)
  107. self.checkequalnofix(2, u'aaa', 'count', 'a', 0, -1)
  108. self.checkequalnofix(0, u'aaa', 'count', 'a', 0, -10)
  109. def test_find(self):
  110. self.checkequalnofix(0, u'abcdefghiabc', 'find', u'abc')
  111. self.checkequalnofix(9, u'abcdefghiabc', 'find', u'abc', 1)
  112. self.checkequalnofix(-1, u'abcdefghiabc', 'find', u'def', 4)
  113. self.assertRaises(TypeError, u'hello'.find)
  114. self.assertRaises(TypeError, u'hello'.find, 42)
  115. def test_rfind(self):
  116. string_tests.CommonTest.test_rfind(self)
  117. # check mixed argument types
  118. self.checkequalnofix(9, 'abcdefghiabc', 'rfind', u'abc')
  119. self.checkequalnofix(12, 'abcdefghiabc', 'rfind', u'')
  120. self.checkequalnofix(12, u'abcdefghiabc', 'rfind', '')
  121. def test_index(self):
  122. string_tests.CommonTest.test_index(self)
  123. # check mixed argument types
  124. for (t1, t2) in ((str, unicode), (unicode, str)):
  125. self.checkequalnofix(0, t1('abcdefghiabc'), 'index', t2(''))
  126. self.checkequalnofix(3, t1('abcdefghiabc'), 'index', t2('def'))
  127. self.checkequalnofix(0, t1('abcdefghiabc'), 'index', t2('abc'))
  128. self.checkequalnofix(9, t1('abcdefghiabc'), 'index', t2('abc'), 1)
  129. self.assertRaises(ValueError, t1('abcdefghiabc').index, t2('hib'))
  130. self.assertRaises(ValueError, t1('abcdefghiab').index, t2('abc'), 1)
  131. self.assertRaises(ValueError, t1('abcdefghi').index, t2('ghi'), 8)
  132. self.assertRaises(ValueError, t1('abcdefghi').index, t2('ghi'), -1)
  133. def test_rindex(self):
  134. string_tests.CommonTest.test_rindex(self)
  135. # check mixed argument types
  136. for (t1, t2) in ((str, unicode), (unicode, str)):
  137. self.checkequalnofix(12, t1('abcdefghiabc'), 'rindex', t2(''))
  138. self.checkequalnofix(3, t1('abcdefghiabc'), 'rindex', t2('def'))
  139. self.checkequalnofix(9, t1('abcdefghiabc'), 'rindex', t2('abc'))
  140. self.checkequalnofix(0, t1('abcdefghiabc'), 'rindex', t2('abc'), 0, -1)
  141. self.assertRaises(ValueError, t1('abcdefghiabc').rindex, t2('hib'))
  142. self.assertRaises(ValueError, t1('defghiabc').rindex, t2('def'), 1)
  143. self.assertRaises(ValueError, t1('defghiabc').rindex, t2('abc'), 0, -1)
  144. self.assertRaises(ValueError, t1('abcdefghi').rindex, t2('ghi'), 0, 8)
  145. self.assertRaises(ValueError, t1('abcdefghi').rindex, t2('ghi'), 0, -1)
  146. def test_translate(self):
  147. self.checkequalnofix(u'bbbc', u'abababc', 'translate', {ord('a'):None})
  148. self.checkequalnofix(u'iiic', u'abababc', 'translate', {ord('a'):None, ord('b'):ord('i')})
  149. self.checkequalnofix(u'iiix', u'abababc', 'translate', {ord('a'):None, ord('b'):ord('i'), ord('c'):u'x'})
  150. self.checkequalnofix(u'<i><i><i>c', u'abababc', 'translate', {ord('a'):None, ord('b'):u'<i>'})
  151. self.checkequalnofix(u'c', u'abababc', 'translate', {ord('a'):None, ord('b'):u''})
  152. self.checkequalnofix(u'xyyx', u'xzx', 'translate', {ord('z'):u'yy'})
  153. self.assertRaises(TypeError, u'hello'.translate)
  154. self.assertRaises(TypeError, u'abababc'.translate, {ord('a'):''})
  155. def test_split(self):
  156. string_tests.CommonTest.test_split(self)
  157. # Mixed arguments
  158. self.checkequalnofix([u'a', u'b', u'c', u'd'], u'a//b//c//d', 'split', '//')
  159. self.checkequalnofix([u'a', u'b', u'c', u'd'], 'a//b//c//d', 'split', u'//')
  160. self.checkequalnofix([u'endcase ', u''], u'endcase test', 'split', 'test')
  161. def test_join(self):
  162. string_tests.MixinStrUnicodeUserStringTest.test_join(self)
  163. # mixed arguments
  164. self.checkequalnofix(u'a b c d', u' ', 'join', ['a', 'b', u'c', u'd'])
  165. self.checkequalnofix(u'abcd', u'', 'join', (u'a', u'b', u'c', u'd'))
  166. self.checkequalnofix(u'w x y z', u' ', 'join', string_tests.Sequence('wxyz'))
  167. self.checkequalnofix(u'a b c d', ' ', 'join', [u'a', u'b', u'c', u'd'])
  168. self.checkequalnofix(u'a b c d', ' ', 'join', ['a', 'b', u'c', u'd'])
  169. self.checkequalnofix(u'abcd', '', 'join', (u'a', u'b', u'c', u'd'))
  170. self.checkequalnofix(u'w x y z', ' ', 'join', string_tests.Sequence(u'wxyz'))
  171. def test_strip(self):
  172. string_tests.CommonTest.test_strip(self)
  173. self.assertRaises(UnicodeError, u"hello".strip, "\xff")
  174. def test_replace(self):
  175. string_tests.CommonTest.test_replace(self)
  176. # method call forwarded from str implementation because of unicode argument
  177. self.checkequalnofix(u'one@two!three!', 'one!two!three!', 'replace', u'!', u'@', 1)
  178. self.assertRaises(TypeError, 'replace'.replace, u"r", 42)
  179. def test_comparison(self):
  180. # Comparisons:
  181. self.assertTrue(u'abc' == 'abc')
  182. self.assertTrue('abc' == u'abc')
  183. self.assertTrue(u'abc' == u'abc')
  184. self.assertTrue(u'abcd' > 'abc')
  185. self.assertTrue('abcd' > u'abc')
  186. self.assertTrue(u'abcd' > u'abc')
  187. self.assertTrue(u'abc' < 'abcd')
  188. self.assertTrue('abc' < u'abcd')
  189. self.assertTrue(u'abc' < u'abcd')
  190. if 0:
  191. # Move these tests to a Unicode collation module test...
  192. # Testing UTF-16 code point order comparisons...
  193. # No surrogates, no fixup required.
  194. self.assertTrue(u'\u0061' < u'\u20ac')
  195. # Non surrogate below surrogate value, no fixup required
  196. self.assertTrue(u'\u0061' < u'\ud800\udc02')
  197. # Non surrogate above surrogate value, fixup required
  198. def test_lecmp(s, s2):
  199. self.assertTrue(s < s2)
  200. def test_fixup(s):
  201. s2 = u'\ud800\udc01'
  202. test_lecmp(s, s2)
  203. s2 = u'\ud900\udc01'
  204. test_lecmp(s, s2)
  205. s2 = u'\uda00\udc01'
  206. test_lecmp(s, s2)
  207. s2 = u'\udb00\udc01'
  208. test_lecmp(s, s2)
  209. s2 = u'\ud800\udd01'
  210. test_lecmp(s, s2)
  211. s2 = u'\ud900\udd01'
  212. test_lecmp(s, s2)
  213. s2 = u'\uda00\udd01'
  214. test_lecmp(s, s2)
  215. s2 = u'\udb00\udd01'
  216. test_lecmp(s, s2)
  217. s2 = u'\ud800\ude01'
  218. test_lecmp(s, s2)
  219. s2 = u'\ud900\ude01'
  220. test_lecmp(s, s2)
  221. s2 = u'\uda00\ude01'
  222. test_lecmp(s, s2)
  223. s2 = u'\udb00\ude01'
  224. test_lecmp(s, s2)
  225. s2 = u'\ud800\udfff'
  226. test_lecmp(s, s2)
  227. s2 = u'\ud900\udfff'
  228. test_lecmp(s, s2)
  229. s2 = u'\uda00\udfff'
  230. test_lecmp(s, s2)
  231. s2 = u'\udb00\udfff'
  232. test_lecmp(s, s2)
  233. test_fixup(u'\ue000')
  234. test_fixup(u'\uff61')
  235. # Surrogates on both sides, no fixup required
  236. self.assertTrue(u'\ud800\udc02' < u'\ud84d\udc56')
  237. def test_islower(self):
  238. string_tests.MixinStrUnicodeUserStringTest.test_islower(self)
  239. self.checkequalnofix(False, u'\u1FFc', 'islower')
  240. def test_isupper(self):
  241. string_tests.MixinStrUnicodeUserStringTest.test_isupper(self)
  242. if not sys.platform.startswith('java'):
  243. self.checkequalnofix(False, u'\u1FFc', 'isupper')
  244. def test_istitle(self):
  245. string_tests.MixinStrUnicodeUserStringTest.test_title(self)
  246. self.checkequalnofix(True, u'\u1FFc', 'istitle')
  247. self.checkequalnofix(True, u'Greek \u1FFcitlecases ...', 'istitle')
  248. def test_isspace(self):
  249. string_tests.MixinStrUnicodeUserStringTest.test_isspace(self)
  250. self.checkequalnofix(True, u'\u2000', 'isspace')
  251. self.checkequalnofix(True, u'\u200a', 'isspace')
  252. self.checkequalnofix(False, u'\u2014', 'isspace')
  253. def test_isalpha(self):
  254. string_tests.MixinStrUnicodeUserStringTest.test_isalpha(self)
  255. self.checkequalnofix(True, u'\u1FFc', 'isalpha')
  256. def test_isdecimal(self):
  257. self.checkequalnofix(False, u'', 'isdecimal')
  258. self.checkequalnofix(False, u'a', 'isdecimal')
  259. self.checkequalnofix(True, u'0', 'isdecimal')
  260. self.checkequalnofix(False, u'\u2460', 'isdecimal') # CIRCLED DIGIT ONE
  261. self.checkequalnofix(False, u'\xbc', 'isdecimal') # VULGAR FRACTION ONE QUARTER
  262. self.checkequalnofix(True, u'\u0660', 'isdecimal') # ARABIC-INDIC DIGIT ZERO
  263. self.checkequalnofix(True, u'0123456789', 'isdecimal')
  264. self.checkequalnofix(False, u'0123456789a', 'isdecimal')
  265. self.checkraises(TypeError, 'abc', 'isdecimal', 42)
  266. def test_isdigit(self):
  267. string_tests.MixinStrUnicodeUserStringTest.test_isdigit(self)
  268. self.checkequalnofix(True, u'\u2460', 'isdigit')
  269. self.checkequalnofix(False, u'\xbc', 'isdigit')
  270. self.checkequalnofix(True, u'\u0660', 'isdigit')
  271. def test_isnumeric(self):
  272. self.checkequalnofix(False, u'', 'isnumeric')
  273. self.checkequalnofix(False, u'a', 'isnumeric')
  274. self.checkequalnofix(True, u'0', 'isnumeric')
  275. self.checkequalnofix(True, u'\u2460', 'isnumeric')
  276. self.checkequalnofix(True, u'\xbc', 'isnumeric')
  277. self.checkequalnofix(True, u'\u0660', 'isnumeric')
  278. self.checkequalnofix(True, u'0123456789', 'isnumeric')
  279. self.checkequalnofix(False, u'0123456789a', 'isnumeric')
  280. self.assertRaises(TypeError, u"abc".isnumeric, 42)
  281. def test_contains(self):
  282. # Testing Unicode contains method
  283. self.assertIn('a', u'abdb')
  284. self.assertIn('a', u'bdab')
  285. self.assertIn('a', u'bdaba')
  286. self.assertIn('a', u'bdba')
  287. self.assertIn('a', u'bdba')
  288. self.assertIn(u'a', u'bdba')
  289. self.assertNotIn(u'a', u'bdb')
  290. self.assertNotIn(u'a', 'bdb')
  291. self.assertIn(u'a', 'bdba')
  292. self.assertIn(u'a', ('a',1,None))
  293. self.assertIn(u'a', (1,None,'a'))
  294. self.assertIn(u'a', (1,None,u'a'))
  295. self.assertIn('a', ('a',1,None))
  296. self.assertIn('a', (1,None,'a'))
  297. self.assertIn('a', (1,None,u'a'))
  298. self.assertNotIn('a', ('x',1,u'y'))
  299. self.assertNotIn('a', ('x',1,None))
  300. self.assertNotIn(u'abcd', u'abcxxxx')
  301. self.assertIn(u'ab', u'abcd')
  302. self.assertIn('ab', u'abc')
  303. self.assertIn(u'ab', 'abc')
  304. self.assertIn(u'ab', (1,None,u'ab'))
  305. self.assertIn(u'', u'abc')
  306. self.assertIn('', u'abc')
  307. # If the following fails either
  308. # the contains operator does not propagate UnicodeErrors or
  309. # someone has changed the default encoding
  310. self.assertRaises(UnicodeDecodeError, 'g\xe2teau'.__contains__, u'\xe2')
  311. self.assertRaises(UnicodeDecodeError, u'g\xe2teau'.__contains__, '\xe2')
  312. self.assertIn(u'', '')
  313. self.assertIn('', u'')
  314. self.assertIn(u'', u'')
  315. self.assertIn(u'', 'abc')
  316. self.assertIn('', u'abc')
  317. self.assertIn(u'', u'abc')
  318. self.assertNotIn(u'\0', 'abc')
  319. self.assertNotIn('\0', u'abc')
  320. self.assertNotIn(u'\0', u'abc')
  321. self.assertIn(u'\0', '\0abc')
  322. self.assertIn('\0', u'\0abc')
  323. self.assertIn(u'\0', u'\0abc')
  324. self.assertIn(u'\0', 'abc\0')
  325. self.assertIn('\0', u'abc\0')
  326. self.assertIn(u'\0', u'abc\0')
  327. self.assertIn(u'a', '\0abc')
  328. self.assertIn('a', u'\0abc')
  329. self.assertIn(u'a', u'\0abc')
  330. self.assertIn(u'asdf', 'asdf')
  331. self.assertIn('asdf', u'asdf')
  332. self.assertIn(u'asdf', u'asdf')
  333. self.assertNotIn(u'asdf', 'asd')
  334. self.assertNotIn('asdf', u'asd')
  335. self.assertNotIn(u'asdf', u'asd')
  336. self.assertNotIn(u'asdf', '')
  337. self.assertNotIn('asdf', u'')
  338. self.assertNotIn(u'asdf', u'')
  339. self.assertRaises(TypeError, u"abc".__contains__)
  340. self.assertRaises(TypeError, u"abc".__contains__, object())
  341. def test_formatting(self):
  342. string_tests.MixinStrUnicodeUserStringTest.test_formatting(self)
  343. # Testing Unicode formatting strings...
  344. self.assertEqual(u"%s, %s" % (u"abc", "abc"), u'abc, abc')
  345. self.assertEqual(u"%s, %s, %i, %f, %5.2f" % (u"abc", "abc", 1, 2, 3), u'abc, abc, 1, 2.000000, 3.00')
  346. self.assertEqual(u"%s, %s, %i, %f, %5.2f" % (u"abc", "abc", 1, -2, 3), u'abc, abc, 1, -2.000000, 3.00')
  347. self.assertEqual(u"%s, %s, %i, %f, %5.2f" % (u"abc", "abc", -1, -2, 3.5), u'abc, abc, -1, -2.000000, 3.50')
  348. self.assertEqual(u"%s, %s, %i, %f, %5.2f" % (u"abc", "abc", -1, -2, 3.57), u'abc, abc, -1, -2.000000, 3.57')
  349. self.assertEqual(u"%s, %s, %i, %f, %5.2f" % (u"abc", "abc", -1, -2, 1003.57), u'abc, abc, -1, -2.000000, 1003.57')
  350. if not sys.platform.startswith('java'):
  351. self.assertEqual(u"%r, %r" % (u"abc", "abc"), u"u'abc', 'abc'")
  352. self.assertEqual(u"%(x)s, %(y)s" % {'x':u"abc", 'y':"def"}, u'abc, def')
  353. self.assertEqual(u"%(x)s, %(\xfc)s" % {'x':u"abc", u'\xfc':"def"}, u'abc, def')
  354. self.assertEqual(u'%c' % 0x1234, u'\u1234')
  355. self.assertRaises(OverflowError, u"%c".__mod__, (sys.maxunicode+1,))
  356. self.assertRaises(ValueError, u"%.1\u1032f".__mod__, (1.0/3))
  357. for num in range(0x00,0x80):
  358. char = chr(num)
  359. self.assertEqual(u"%c" % char, unicode(char))
  360. self.assertEqual(u"%c" % num, unicode(char))
  361. self.assertTrue(char == u"%c" % char)
  362. self.assertTrue(char == u"%c" % num)
  363. # Issue 7649
  364. for num in range(0x80,0x100):
  365. uchar = unichr(num)
  366. self.assertEqual(uchar, u"%c" % num) # works only with ints
  367. self.assertEqual(uchar, u"%c" % uchar) # and unicode chars
  368. # the implicit decoding should fail for non-ascii chars
  369. self.assertRaises(UnicodeDecodeError, u"%c".__mod__, chr(num))
  370. self.assertRaises(UnicodeDecodeError, u"%s".__mod__, chr(num))
  371. # formatting jobs delegated from the string implementation:
  372. self.assertEqual('...%(foo)s...' % {'foo':u"abc"}, u'...abc...')
  373. self.assertEqual('...%(foo)s...' % {'foo':"abc"}, '...abc...')
  374. self.assertEqual('...%(foo)s...' % {u'foo':"abc"}, '...abc...')
  375. self.assertEqual('...%(foo)s...' % {u'foo':u"abc"}, u'...abc...')
  376. self.assertEqual('...%(foo)s...' % {u'foo':u"abc",'def':123}, u'...abc...')
  377. self.assertEqual('...%(foo)s...' % {u'foo':u"abc",u'def':123}, u'...abc...')
  378. self.assertEqual('...%s...%s...%s...%s...' % (1,2,3,u"abc"), u'...1...2...3...abc...')
  379. self.assertEqual('...%%...%%s...%s...%s...%s...%s...' % (1,2,3,u"abc"), u'...%...%s...1...2...3...abc...')
  380. self.assertEqual('...%s...' % u"abc", u'...abc...')
  381. self.assertEqual('%*s' % (5,u'abc',), u' abc')
  382. self.assertEqual('%*s' % (-5,u'abc',), u'abc ')
  383. self.assertEqual('%*.*s' % (5,2,u'abc',), u' ab')
  384. self.assertEqual('%*.*s' % (5,3,u'abc',), u' abc')
  385. self.assertEqual('%i %*.*s' % (10, 5,3,u'abc',), u'10 abc')
  386. self.assertEqual('%i%s %*.*s' % (10, 3, 5, 3, u'abc',), u'103 abc')
  387. self.assertEqual('%c' % u'a', u'a')
  388. class Wrapper:
  389. def __str__(self):
  390. return u'\u1234'
  391. self.assertEqual('%s' % Wrapper(), u'\u1234')
  392. def test_startswith_endswith_errors(self):
  393. for meth in (u'foo'.startswith, u'foo'.endswith):
  394. with self.assertRaises(UnicodeDecodeError):
  395. meth('\xff')
  396. with self.assertRaises(TypeError) as cm:
  397. meth(['f'])
  398. if test_support.check_impl_detail():
  399. exc = str(cm.exception)
  400. self.assertIn('unicode', exc)
  401. self.assertIn('str', exc)
  402. self.assertIn('tuple', exc)
  403. @test_support.run_with_locale('LC_ALL', 'de_DE', 'fr_FR')
  404. def test_format_float(self):
  405. # should not format with a comma, but always with C locale
  406. self.assertEqual(u'1.0', u'%.1f' % 1.0)
  407. def test_constructor(self):
  408. # unicode(obj) tests (this maps to PyObject_Unicode() at C level)
  409. self.assertEqual(
  410. unicode(u'unicode remains unicode'),
  411. u'unicode remains unicode'
  412. )
  413. class UnicodeSubclass(unicode):
  414. pass
  415. self.assertEqual(
  416. unicode(UnicodeSubclass('unicode subclass becomes unicode')),
  417. u'unicode subclass becomes unicode'
  418. )
  419. self.assertEqual(
  420. unicode('strings are converted to unicode'),
  421. u'strings are converted to unicode'
  422. )
  423. class UnicodeCompat:
  424. def __init__(self, x):
  425. self.x = x
  426. def __unicode__(self):
  427. return self.x
  428. self.assertEqual(
  429. unicode(UnicodeCompat('__unicode__ compatible objects are recognized')),
  430. u'__unicode__ compatible objects are recognized')
  431. class StringCompat:
  432. def __init__(self, x):
  433. self.x = x
  434. def __str__(self):
  435. return self.x
  436. self.assertEqual(
  437. unicode(StringCompat('__str__ compatible objects are recognized')),
  438. u'__str__ compatible objects are recognized'
  439. )
  440. # unicode(obj) is compatible to str():
  441. o = StringCompat('unicode(obj) is compatible to str()')
  442. self.assertEqual(unicode(o), u'unicode(obj) is compatible to str()')
  443. self.assertEqual(str(o), 'unicode(obj) is compatible to str()')
  444. # %-formatting and .__unicode__()
  445. self.assertEqual(u'%s' %
  446. UnicodeCompat(u"u'%s' % obj uses obj.__unicode__()"),
  447. u"u'%s' % obj uses obj.__unicode__()")
  448. self.assertEqual(u'%s' %
  449. UnicodeCompat(u"u'%s' % obj falls back to obj.__str__()"),
  450. u"u'%s' % obj falls back to obj.__str__()")
  451. for obj in (123, 123.45, 123L):
  452. self.assertEqual(unicode(obj), unicode(str(obj)))
  453. # unicode(obj, encoding, error) tests (this maps to
  454. # PyUnicode_FromEncodedObject() at C level)
  455. if not sys.platform.startswith('java'):
  456. self.assertRaises(
  457. TypeError,
  458. unicode,
  459. u'decoding unicode is not supported',
  460. 'utf-8',
  461. 'strict'
  462. )
  463. self.assertEqual(
  464. unicode('strings are decoded to unicode', 'utf-8', 'strict'),
  465. u'strings are decoded to unicode'
  466. )
  467. if not sys.platform.startswith('java'):
  468. with test_support.check_py3k_warnings():
  469. buf = buffer('character buffers are decoded to unicode')
  470. self.assertEqual(
  471. unicode(
  472. buf,
  473. 'utf-8',
  474. 'strict'
  475. ),
  476. u'character buffers are decoded to unicode'
  477. )
  478. self.assertRaises(TypeError, unicode, 42, 42, 42)
  479. def test_codecs_utf7(self):
  480. utfTests = [
  481. (u'A\u2262\u0391.', 'A+ImIDkQ.'), # RFC2152 example
  482. (u'Hi Mom -\u263a-!', 'Hi Mom -+Jjo--!'), # RFC2152 example
  483. (u'\u65E5\u672C\u8A9E', '+ZeVnLIqe-'), # RFC2152 example
  484. (u'Item 3 is \u00a31.', 'Item 3 is +AKM-1.'), # RFC2152 example
  485. (u'+', '+-'),
  486. (u'+-', '+--'),
  487. (u'+?', '+-?'),
  488. (u'\?', '+AFw?'),
  489. (u'+?', '+-?'),
  490. (ur'\\?', '+AFwAXA?'),
  491. (ur'\\\?', '+AFwAXABc?'),
  492. (ur'++--', '+-+---'),
  493. (u'\U000abcde', '+2m/c3g-'), # surrogate pairs
  494. (u'/', '/'),
  495. ]
  496. for (x, y) in utfTests:
  497. self.assertEqual(x.encode('utf-7'), y)
  498. # Unpaired surrogates not supported
  499. self.assertRaises(UnicodeError, unicode, '+3ADYAA-', 'utf-7')
  500. self.assertEqual(unicode('+3ADYAA-', 'utf-7', 'replace'), u'\ufffd\ufffd')
  501. # Direct encoded characters
  502. set_d = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'(),-./:?"
  503. # Optional direct characters
  504. set_o = '!"#$%&*;<=>@[]^_`{|}'
  505. for c in set_d:
  506. self.assertEqual(c.encode('utf7'), c.encode('ascii'))
  507. self.assertEqual(c.encode('ascii').decode('utf7'), unicode(c))
  508. self.assertTrue(c == c.encode('ascii').decode('utf7'))
  509. for c in set_o:
  510. self.assertEqual(c.encode('ascii').decode('utf7'), unicode(c))
  511. self.assertTrue(c == c.encode('ascii').decode('utf7'))
  512. def test_codecs_utf8(self):
  513. self.assertEqual(u''.encode('utf-8'), '')
  514. self.assertEqual(u'\u20ac'.encode('utf-8'), '\xe2\x82\xac')
  515. self.assertEqual(u'\ud800\udc02'.encode('utf-8'), '\xf0\x90\x80\x82')
  516. self.assertEqual(u'\ud84d\udc56'.encode('utf-8'), '\xf0\xa3\x91\x96')
  517. self.assertEqual(u'\ud800'.encode('utf-8'), '\xed\xa0\x80')
  518. self.assertEqual(u'\udc00'.encode('utf-8'), '\xed\xb0\x80')
  519. self.assertEqual(
  520. (u'\ud800\udc02'*1000).encode('utf-8'),
  521. '\xf0\x90\x80\x82'*1000
  522. )
  523. self.assertEqual(
  524. u'\u6b63\u78ba\u306b\u8a00\u3046\u3068\u7ffb\u8a33\u306f'
  525. u'\u3055\u308c\u3066\u3044\u307e\u305b\u3093\u3002\u4e00'
  526. u'\u90e8\u306f\u30c9\u30a4\u30c4\u8a9e\u3067\u3059\u304c'
  527. u'\u3001\u3042\u3068\u306f\u3067\u305f\u3089\u3081\u3067'
  528. u'\u3059\u3002\u5b9f\u969b\u306b\u306f\u300cWenn ist das'
  529. u' Nunstuck git und'.encode('utf-8'),
  530. '\xe6\xad\xa3\xe7\xa2\xba\xe3\x81\xab\xe8\xa8\x80\xe3\x81'
  531. '\x86\xe3\x81\xa8\xe7\xbf\xbb\xe8\xa8\xb3\xe3\x81\xaf\xe3'
  532. '\x81\x95\xe3\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3\x81\xbe'
  533. '\xe3\x81\x9b\xe3\x82\x93\xe3\x80\x82\xe4\xb8\x80\xe9\x83'
  534. '\xa8\xe3\x81\xaf\xe3\x83\x89\xe3\x82\xa4\xe3\x83\x84\xe8'
  535. '\xaa\x9e\xe3\x81\xa7\xe3\x81\x99\xe3\x81\x8c\xe3\x80\x81'
  536. '\xe3\x81\x82\xe3\x81\xa8\xe3\x81\xaf\xe3\x81\xa7\xe3\x81'
  537. '\x9f\xe3\x82\x89\xe3\x82\x81\xe3\x81\xa7\xe3\x81\x99\xe3'
  538. '\x80\x82\xe5\xae\x9f\xe9\x9a\x9b\xe3\x81\xab\xe3\x81\xaf'
  539. '\xe3\x80\x8cWenn ist das Nunstuck git und'
  540. )
  541. # UTF-8 specific decoding tests
  542. self.assertEqual(unicode('\xf0\xa3\x91\x96', 'utf-8'), u'\U00023456')
  543. self.assertEqual(unicode('\xf0\x90\x80\x82', 'utf-8'), u'\U00010002')
  544. self.assertEqual(unicode('\xe2\x82\xac', 'utf-8'), u'\u20ac')
  545. # Other possible utf-8 test cases:
  546. # * strict decoding testing for all of the
  547. # UTF8_ERROR cases in PyUnicode_DecodeUTF8
  548. def test_utf8_decode_valid_sequences(self):
  549. sequences = [
  550. # single byte
  551. ('\x00', u'\x00'), ('a', u'a'), ('\x7f', u'\x7f'),
  552. # 2 bytes
  553. ('\xc2\x80', u'\x80'), ('\xdf\xbf', u'\u07ff'),
  554. # 3 bytes
  555. ('\xe0\xa0\x80', u'\u0800'), ('\xed\x9f\xbf', u'\ud7ff'),
  556. ('\xee\x80\x80', u'\uE000'), ('\xef\xbf\xbf', u'\uffff'),
  557. # 4 bytes
  558. ('\xF0\x90\x80\x80', u'\U00010000'),
  559. ('\xf4\x8f\xbf\xbf', u'\U0010FFFF')
  560. ]
  561. for seq, res in sequences:
  562. self.assertEqual(seq.decode('utf-8'), res)
  563. for ch in map(unichr, range(0, sys.maxunicode)):
  564. self.assertEqual(ch, ch.encode('utf-8').decode('utf-8'))
  565. def test_utf8_decode_invalid_sequences(self):
  566. # continuation bytes in a sequence of 2, 3, or 4 bytes
  567. continuation_bytes = map(chr, range(0x80, 0xC0))
  568. # start bytes of a 2-byte sequence equivalent to codepoints < 0x7F
  569. invalid_2B_seq_start_bytes = map(chr, range(0xC0, 0xC2))
  570. # start bytes of a 4-byte sequence equivalent to codepoints > 0x10FFFF
  571. invalid_4B_seq_start_bytes = map(chr, range(0xF5, 0xF8))
  572. invalid_start_bytes = (
  573. continuation_bytes + invalid_2B_seq_start_bytes +
  574. invalid_4B_seq_start_bytes + map(chr, range(0xF7, 0x100))
  575. )
  576. for byte in invalid_start_bytes:
  577. self.assertRaises(UnicodeDecodeError, byte.decode, 'utf-8')
  578. for sb in invalid_2B_seq_start_bytes:
  579. for cb in continuation_bytes:
  580. self.assertRaises(UnicodeDecodeError, (sb+cb).decode, 'utf-8')
  581. for sb in invalid_4B_seq_start_bytes:
  582. for cb1 in continuation_bytes[:3]:
  583. for cb3 in continuation_bytes[:3]:
  584. self.assertRaises(UnicodeDecodeError,
  585. (sb+cb1+'\x80'+cb3).decode, 'utf-8')
  586. for cb in map(chr, range(0x80, 0xA0)):
  587. self.assertRaises(UnicodeDecodeError,
  588. ('\xE0'+cb+'\x80').decode, 'utf-8')
  589. self.assertRaises(UnicodeDecodeError,
  590. ('\xE0'+cb+'\xBF').decode, 'utf-8')
  591. # XXX: surrogates shouldn't be valid UTF-8!
  592. # see http://www.unicode.org/versions/Unicode5.2.0/ch03.pdf
  593. # (table 3-7) and http://www.rfc-editor.org/rfc/rfc3629.txt
  594. #for cb in map(chr, range(0xA0, 0xC0)):
  595. #self.assertRaises(UnicodeDecodeError,
  596. #('\xED'+cb+'\x80').decode, 'utf-8')
  597. #self.assertRaises(UnicodeDecodeError,
  598. #('\xED'+cb+'\xBF').decode, 'utf-8')
  599. # but since they are valid on Python 2 add a test for that:
  600. for cb, surrogate in zip(map(chr, range(0xA0, 0xC0)),
  601. map(unichr, range(0xd800, 0xe000, 64))):
  602. encoded = '\xED'+cb+'\x80'
  603. self.assertEqual(encoded.decode('utf-8'), surrogate)
  604. self.assertEqual(surrogate.encode('utf-8'), encoded)
  605. for cb in map(chr, range(0x80, 0x90)):
  606. self.assertRaises(UnicodeDecodeError,
  607. ('\xF0'+cb+'\x80\x80').decode, 'utf-8')
  608. self.assertRaises(UnicodeDecodeError,
  609. ('\xF0'+cb+'\xBF\xBF').decode, 'utf-8')
  610. for cb in map(chr, range(0x90, 0xC0)):
  611. self.assertRaises(UnicodeDecodeError,
  612. ('\xF4'+cb+'\x80\x80').decode, 'utf-8')
  613. self.assertRaises(UnicodeDecodeError,
  614. ('\xF4'+cb+'\xBF\xBF').decode, 'utf-8')
  615. def test_issue8271(self):
  616. # Issue #8271: during the decoding of an invalid UTF-8 byte sequence,
  617. # only the start byte and the continuation byte(s) are now considered
  618. # invalid, instead of the number of bytes specified by the start byte.
  619. # See http://www.unicode.org/versions/Unicode5.2.0/ch03.pdf (page 95,
  620. # table 3-8, Row 2) for more information about the algorithm used.
  621. FFFD = u'\ufffd'
  622. sequences = [
  623. # invalid start bytes
  624. ('\x80', FFFD), # continuation byte
  625. ('\x80\x80', FFFD*2), # 2 continuation bytes
  626. ('\xc0', FFFD),
  627. ('\xc0\xc0', FFFD*2),
  628. ('\xc1', FFFD),
  629. ('\xc1\xc0', FFFD*2),
  630. ('\xc0\xc1', FFFD*2),
  631. # with start byte of a 2-byte sequence
  632. ('\xc2', FFFD), # only the start byte
  633. ('\xc2\xc2', FFFD*2), # 2 start bytes
  634. ('\xc2\xc2\xc2', FFFD*3), # 2 start bytes
  635. ('\xc2\x41', FFFD+'A'), # invalid continuation byte
  636. # with start byte of a 3-byte sequence
  637. ('\xe1', FFFD), # only the start byte
  638. ('\xe1\xe1', FFFD*2), # 2 start bytes
  639. ('\xe1\xe1\xe1', FFFD*3), # 3 start bytes
  640. ('\xe1\xe1\xe1\xe1', FFFD*4), # 4 start bytes
  641. ('\xe1\x80', FFFD), # only 1 continuation byte
  642. ('\xe1\x41', FFFD+'A'), # invalid continuation byte
  643. ('\xe1\x41\x80', FFFD+'A'+FFFD), # invalid cb followed by valid cb
  644. ('\xe1\x41\x41', FFFD+'AA'), # 2 invalid continuation bytes
  645. ('\xe1\x80\x41', FFFD+'A'), # only 1 valid continuation byte
  646. ('\xe1\x80\xe1\x41', FFFD*2+'A'), # 1 valid and the other invalid
  647. ('\xe1\x41\xe1\x80', FFFD+'A'+FFFD), # 1 invalid and the other valid
  648. # with start byte of a 4-byte sequence
  649. ('\xf1', FFFD), # only the start byte
  650. ('\xf1\xf1', FFFD*2), # 2 start bytes
  651. ('\xf1\xf1\xf1', FFFD*3), # 3 start bytes
  652. ('\xf1\xf1\xf1\xf1', FFFD*4), # 4 start bytes
  653. ('\xf1\xf1\xf1\xf1\xf1', FFFD*5), # 5 start bytes
  654. ('\xf1\x80', FFFD), # only 1 continuation bytes
  655. ('\xf1\x80\x80', FFFD), # only 2 continuation bytes
  656. ('\xf1\x80\x41', FFFD+'A'), # 1 valid cb and 1 invalid
  657. ('\xf1\x80\x41\x41', FFFD+'AA'), # 1 valid cb and 1 invalid
  658. ('\xf1\x80\x80\x41', FFFD+'A'), # 2 valid cb and 1 invalid
  659. ('\xf1\x41\x80', FFFD+'A'+FFFD), # 1 invalid cv and 1 valid
  660. ('\xf1\x41\x80\x80', FFFD+'A'+FFFD*2), # 1 invalid cb and 2 invalid
  661. ('\xf1\x41\x80\x41', FFFD+'A'+FFFD+'A'), # 2 invalid cb and 1 invalid
  662. ('\xf1\x41\x41\x80', FFFD+'AA'+FFFD), # 1 valid cb and 1 invalid
  663. ('\xf1\x41\xf1\x80', FFFD+'A'+FFFD),
  664. ('\xf1\x41\x80\xf1', FFFD+'A'+FFFD*2),
  665. ('\xf1\xf1\x80\x41', FFFD*2+'A'),
  666. ('\xf1\x41\xf1\xf1', FFFD+'A'+FFFD*2),
  667. # with invalid start byte of a 4-byte sequence (rfc2279)
  668. ('\xf5', FFFD), # only the start byte
  669. ('\xf5\xf5', FFFD*2), # 2 start bytes
  670. ('\xf5\x80', FFFD*2), # only 1 continuation byte
  671. ('\xf5\x80\x80', FFFD*3), # only 2 continuation byte
  672. ('\xf5\x80\x80\x80', FFFD*4), # 3 continuation bytes
  673. ('\xf5\x80\x41', FFFD*2+'A'), # 1 valid cb and 1 invalid
  674. ('\xf5\x80\x41\xf5', FFFD*2+'A'+FFFD),
  675. ('\xf5\x41\x80\x80\x41', FFFD+'A'+FFFD*2+'A'),
  676. # with invalid start byte of a 5-byte sequence (rfc2279)
  677. ('\xf8', FFFD), # only the start byte
  678. ('\xf8\xf8', FFFD*2), # 2 start bytes
  679. ('\xf8\x80', FFFD*2), # only one continuation byte
  680. ('\xf8\x80\x41', FFFD*2 + 'A'), # 1 valid cb and 1 invalid
  681. ('\xf8\x80\x80\x80\x80', FFFD*5), # invalid 5 bytes seq with 5 bytes
  682. # with invalid start byte of a 6-byte sequence (rfc2279)
  683. ('\xfc', FFFD), # only the start byte
  684. ('\xfc\xfc', FFFD*2), # 2 start bytes
  685. ('\xfc\x80\x80', FFFD*3), # only 2 continuation bytes
  686. ('\xfc\x80\x80\x80\x80\x80', FFFD*6), # 6 continuation bytes
  687. # invalid start byte
  688. ('\xfe', FFFD),
  689. ('\xfe\x80\x80', FFFD*3),
  690. # other sequences
  691. ('\xf1\x80\x41\x42\x43', u'\ufffd\x41\x42\x43'),
  692. ('\xf1\x80\xff\x42\x43', u'\ufffd\ufffd\x42\x43'),
  693. ('\xf1\x80\xc2\x81\x43', u'\ufffd\x81\x43'),
  694. ('\x61\xF1\x80\x80\xE1\x80\xC2\x62\x80\x63\x80\xBF\x64',
  695. u'\x61\uFFFD\uFFFD\uFFFD\x62\uFFFD\x63\uFFFD\uFFFD\x64'),
  696. ]
  697. for n, (seq, res) in enumerate(sequences):
  698. self.assertRaises(UnicodeDecodeError, seq.decode, 'utf-8', 'strict')
  699. self.assertEqual(seq.decode('utf-8', 'replace'), res)
  700. self.assertEqual((seq+'b').decode('utf-8', 'replace'), res+'b')
  701. self.assertEqual(seq.decode('utf-8', 'ignore'),
  702. res.replace(u'\uFFFD', ''))
  703. def test_codecs_idna(self):
  704. # Test whether trailing dot is preserved
  705. self.assertEqual(u"www.python.org.".encode("idna"), "www.python.org.")
  706. def test_codecs_errors(self):
  707. # Error handling (encoding)
  708. self.assertRaises(UnicodeError, u'Andr\202 x'.encode, 'ascii')
  709. self.assertRaises(UnicodeError, u'Andr\202 x'.encode, 'ascii','strict')
  710. self.assertEqual(u'Andr\202 x'.encode('ascii','ignore'), "Andr x")
  711. self.assertEqual(u'Andr\202 x'.encode('ascii','replace'), "Andr? x")
  712. self.assertEqual(u'Andr\202 x'.encode('ascii', 'replace'),
  713. u'Andr\202 x'.encode('ascii', errors='replace'))
  714. self.assertEqual(u'Andr\202 x'.encode('ascii', 'ignore'),
  715. u'Andr\202 x'.encode(encoding='ascii', errors='ignore'))
  716. # Error handling (decoding)
  717. self.assertRaises(UnicodeError, unicode, 'Andr\202 x', 'ascii')
  718. self.assertRaises(UnicodeError, unicode, 'Andr\202 x', 'ascii','strict')
  719. self.assertEqual(unicode('Andr\202 x','ascii','ignore'), u"Andr x")
  720. self.assertEqual(unicode('Andr\202 x','ascii','replace'), u'Andr\uFFFD x')
  721. self.assertEqual(u'abcde'.decode('ascii', 'ignore'),
  722. u'abcde'.decode('ascii', errors='ignore'))
  723. self.assertEqual(u'abcde'.decode('ascii', 'replace'),
  724. u'abcde'.decode(encoding='ascii', errors='replace'))
  725. # Error handling (unknown character names)
  726. self.assertEqual("\\N{foo}xx".decode("unicode-escape", "ignore"), u"xx")
  727. # Error handling (truncated escape sequence)
  728. self.assertRaises(UnicodeError, "\\".decode, "unicode-escape")
  729. self.assertRaises(TypeError, "hello".decode, "test.unicode1")
  730. self.assertRaises(TypeError, unicode, "hello", "test.unicode2")
  731. self.assertRaises(TypeError, u"hello".encode, "test.unicode1")
  732. self.assertRaises(TypeError, u"hello".encode, "test.unicode2")
  733. # executes PyUnicode_Encode()
  734. import imp
  735. self.assertRaises(
  736. ImportError,
  737. imp.find_module,
  738. "non-existing module",
  739. [u"non-existing dir"]
  740. )
  741. # Error handling (wrong arguments)
  742. self.assertRaises(TypeError, u"hello".encode, 42, 42, 42)
  743. # Error handling (PyUnicode_EncodeDecimal())
  744. self.assertRaises(UnicodeError, int, u"\u0200")
  745. def test_codecs(self):
  746. # Encoding
  747. self.assertEqual(u'hello'.encode('ascii'), 'hello')
  748. self.assertEqual(u'hello'.encode('utf-7'), 'hello')
  749. self.assertEqual(u'hello'.encode('utf-8'), 'hello')
  750. self.assertEqual(u'hello'.encode('utf8'), 'hello')
  751. self.assertEqual(u'hello'.encode('utf-16-le'), 'h\000e\000l\000l\000o\000')
  752. self.assertEqual(u'hello'.encode('utf-16-be'), '\000h\000e\000l\000l\000o')
  753. self.assertEqual(u'hello'.encode('latin-1'), 'hello')
  754. # Roundtrip safety for BMP (just the first 1024 chars)
  755. for c in xrange(1024):
  756. u = unichr(c)
  757. for encoding in ('utf-7', 'utf-8', 'utf-16', 'utf-16-le',
  758. 'utf-16-be', 'raw_unicode_escape',
  759. 'unicode_escape', 'unicode_internal'):
  760. self.assertEqual(unicode(u.encode(encoding),encoding), u)
  761. # Roundtrip safety for BMP (just the first 256 chars)
  762. for c in xrange(256):
  763. u = unichr(c)
  764. for encoding in ('latin-1',):
  765. self.assertEqual(unicode(u.encode(encoding),encoding), u)
  766. # Roundtrip safety for BMP (just the first 128 chars)
  767. for c in xrange(128):
  768. u = unichr(c)
  769. for encoding in ('ascii',):
  770. self.assertEqual(unicode(u.encode(encoding),encoding), u)
  771. # Roundtrip safety for non-BMP (just a few chars)
  772. u = u'\U00010001\U00020002\U00030003\U00040004\U00050005'
  773. for encoding in ('utf-8', 'utf-16', 'utf-16-le', 'utf-16-be',
  774. #'raw_unicode_escape',
  775. 'unicode_escape', 'unicode_internal'):
  776. self.assertEqual(unicode(u.encode(encoding),encoding), u)
  777. # UTF-8 must be roundtrip safe for all UCS-2 code points
  778. # This excludes surrogates: in the full range, there would be
  779. # a surrogate pair (\udbff\udc00), which gets converted back
  780. # to a non-BMP character (\U0010fc00)
  781. u = u''.join(map(unichr, range(0,0xd800)+range(0xe000,0x10000)))
  782. for encoding in ('utf-8',):
  783. self.assertEqual(unicode(u.encode(encoding),encoding), u)
  784. def test_codecs_charmap(self):
  785. # 0-127
  786. s = ''.join(map(chr, xrange(128)))
  787. for encoding in (
  788. 'cp037', 'cp1026',
  789. 'cp437', 'cp500', 'cp720', 'cp737', 'cp775', 'cp850',
  790. 'cp852', 'cp855', 'cp858', 'cp860', 'cp861', 'cp862',
  791. 'cp863', 'cp865', 'cp866',
  792. 'iso8859_10', 'iso8859_13', 'iso8859_14', 'iso8859_15',
  793. 'iso8859_2', 'iso8859_3', 'iso8859_4', 'iso8859_5', 'iso8859_6',
  794. 'iso8859_7', 'iso8859_9', 'koi8_r', 'latin_1',
  795. 'mac_cyrillic', 'mac_latin2',
  796. 'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254', 'cp1255',
  797. 'cp1256', 'cp1257', 'cp1258',
  798. 'cp856', 'cp857', 'cp864', 'cp869', 'cp874',
  799. 'mac_greek', 'mac_iceland','mac_roman', 'mac_turkish',
  800. 'cp1006', 'iso8859_8',
  801. ### These have undefined mappings:
  802. #'cp424',
  803. ### These fail the round-trip:
  804. #'cp875'
  805. ):
  806. self.assertEqual(unicode(s, encoding).encode(encoding), s)
  807. # 128-255
  808. s = ''.join(map(chr, xrange(128, 256)))
  809. for encoding in (
  810. 'cp037', 'cp1026',
  811. 'cp437', 'cp500', 'cp720', 'cp737', 'cp775', 'cp850',
  812. 'cp852', 'cp855', 'cp858', 'cp860', 'cp861', 'cp862',
  813. 'cp863', 'cp865', 'cp866',
  814. 'iso8859_10', 'iso8859_13', 'iso8859_14', 'iso8859_15',
  815. 'iso8859_2', 'iso8859_4', 'iso8859_5',
  816. 'iso8859_9', 'koi8_r', 'latin_1',
  817. 'mac_cyrillic', 'mac_latin2',
  818. ### These have undefined mappings:
  819. #'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254', 'cp1255',
  820. #'cp1256', 'cp1257', 'cp1258',
  821. #'cp424', 'cp856', 'cp857', 'cp864', 'cp869', 'cp874',
  822. #'iso8859_3', 'iso8859_6', 'iso8859_7',
  823. #'mac_greek', 'mac_iceland','mac_roman', 'mac_turkish',
  824. ### These fail the round-trip:
  825. #'cp1006', 'cp875', 'iso8859_8',
  826. ):
  827. self.assertEqual(unicode(s, encoding).encode(encoding), s)
  828. def test_concatenation(self):
  829. self.assertEqual((u"abc" u"def"), u"abcdef")
  830. self.assertEqual(("abc" u"def"), u"abcdef")
  831. self.assertEqual((u"abc" "def"), u"abcdef")
  832. self.assertEqual((u"abc" u"def" "ghi"), u"abcdefghi")
  833. self.assertEqual(("abc" "def" u"ghi"), u"abcdefghi")
  834. def test_printing(self):
  835. class BitBucket:
  836. def write(self, text):
  837. pass
  838. out = BitBucket()
  839. print >>out, u'abc'
  840. print >>out, u'abc', u'def'
  841. print >>out, u'abc', 'def'
  842. print >>out, 'abc', u'def'
  843. print >>out, u'abc\n'
  844. print >>out, u'abc\n',
  845. print >>out, u'abc\n',
  846. print >>out, u'def\n'
  847. print >>out, u'def\n'
  848. def test_ucs4(self):
  849. x = u'\U00100000'
  850. y = x.encode("raw-unicode-escape").decode("raw-unicode-escape")
  851. self.assertEqual(x, y)
  852. y = r'\U00100000'
  853. x = y.decode("raw-unicode-escape").encode("raw-unicode-escape")
  854. self.assertEqual(x, y)
  855. y = r'\U00010000'
  856. x = y.decode("raw-unicode-escape").encode("raw-unicode-escape")
  857. self.assertEqual(x, y)
  858. try:
  859. '\U11111111'.decode("raw-unicode-escape")
  860. except UnicodeDecodeError as e:
  861. self.assertEqual(e.start, 0)
  862. self.assertEqual(e.end, 10)
  863. else:
  864. self.fail("Should have raised UnicodeDecodeError")
  865. def test_conversion(self):
  866. # Make sure __unicode__() works properly
  867. class Foo0:
  868. def __str__(self):
  869. return "foo"
  870. class Foo1:
  871. def __unicode__(self):
  872. return u"foo"
  873. class Foo2(object):
  874. def __unicode__(self):
  875. return u"foo"
  876. class Foo3(object):
  877. def __unicode__(self):
  878. return "foo"
  879. class Foo4(str):
  880. def __unicode__(self):
  881. return "foo"
  882. class Foo5(unicode):
  883. def __unicode__(self):
  884. return "foo"
  885. class Foo6(str):
  886. def __str__(self):
  887. return "foos"
  888. def __unicode__(self):
  889. return u"foou"
  890. class Foo7(unicode):
  891. def __str__(self):
  892. return "foos"
  893. def __unicode__(self):
  894. return u"foou"
  895. class Foo8(unicode):
  896. def __new__(cls, content=""):
  897. return unicode.__new__(cls, 2*content)
  898. def __unicode__(self):
  899. return self
  900. class Foo9(unicode):
  901. def __str__(self):
  902. return "string"
  903. def __unicode__(self):
  904. return "not unicode"
  905. self.assertEqual(unicode(Foo0()), u"foo")
  906. self.assertEqual(unicode(Foo1()), u"foo")
  907. self.assertEqual(unicode(Foo2()), u"foo")
  908. self.assertEqual(unicode(Foo3()), u"foo")
  909. self.assertEqual(unicode(Foo4("bar")), u"foo")
  910. self.assertEqual(unicode(Foo5("bar")), u"foo")
  911. self.assertEqual(unicode(Foo6("bar")), u"foou")
  912. self.assertEqual(unicode(Foo7("bar")), u"foou")
  913. self.assertEqual(unicode(Foo8("foo")), u"foofoo")
  914. self.assertEqual(str(Foo9("foo")), "string")
  915. self.assertEqual(unicode(Foo9("foo")), u"not unicode")
  916. def test_unicode_repr(self):
  917. class s1:
  918. def __repr__(self):
  919. return '\\n'
  920. class s2:
  921. def __repr__(self):
  922. return u'\\n'
  923. self.assertEqual(repr(s1()), '\\n')
  924. self.assertEqual(repr(s2()), '\\n')
  925. def test_expandtabs_overflows_gracefully(self):
  926. # This test only affects 32-bit platforms because expandtabs can only take
  927. # an int as the max value, not a 64-bit C long. If expandtabs is changed
  928. # to take a 64-bit long, this test should apply to all platforms.
  929. if sys.maxint > (1 << 32) or struct.calcsize('P') != 4:
  930. return
  931. self.assertRaises((OverflowError, MemoryError),
  932. u't\tt\t'.expandtabs, sys.maxint)
  933. def test__format__(self):
  934. def test(value, format, expected):
  935. # test both with and without the trailing 's'
  936. self.assertEqual(value.__format__(format), expected)
  937. self.assertEqual(value.__format__(format + u's'), expected)
  938. test(u'', u'', u'')
  939. test(u'abc', u'', u'abc')
  940. test(u'abc', u'.3', u'abc')
  941. test(u'ab', u'.3', u'ab')
  942. test(u'abcdef', u'.3', u'abc')
  943. test(u'abcdef', u'.0', u'')
  944. test(u'abc', u'3.3', u'abc')
  945. test(u'abc', u'2.3', u'abc')
  946. test(u'abc', u'2.2', u'ab')
  947. test(u'abc', u'3.2', u'ab ')
  948. test(u'result', u'x<0', u'result')
  949. test(u'result', u'x<5', u'result')
  950. test(u'result', u'x<6', u'result')
  951. test(u'result', u'x<7', u'resultx')
  952. test(u'result', u'x<8', u'resultxx')
  953. test(u'result', u' <7', u'result ')
  954. test(u'result', u'<7', u'result ')
  955. test(u'result', u'>7', u' result')
  956. test(u'result', u'>8', u' result')
  957. test(u'result', u'^8', u' result ')
  958. test(u'result', u'^9', u' result ')
  959. test(u'result', u'^10', u' result ')
  960. test(u'a', u'10000', u'a' + u' ' * 9999)
  961. test(u'', u'10000', u' ' * 10000)
  962. test(u'', u'10000000', u' ' * 10000000)
  963. # test mixing unicode and str
  964. self.assertEqual(u'abc'.__format__('s'), u'abc')
  965. self.assertEqual(u'abc'.__format__('->10s'), u'-------abc')
  966. def test_format(self):
  967. self.assertEqual(u''.format(), u'')
  968. self.assertEqual(u'a'.format(), u'a')
  969. self.assertEqual(u'ab'.format(), u'ab')
  970. self.assertEqual(u'a{{'.format(), u'a{')
  971. self.assertEqual(u'a}}'.format(), u'a}')
  972. self.assertEqual(u'{{b'.format(), u'{b')
  973. self.assertEqual(u'}}b'.format(), u'}b')
  974. self.assertEqual(u'a{{b'.format(), u'a{b')
  975. # examples from the PEP:
  976. import datetime
  977. self.assertEqual(u"My name is {0}".format(u'Fred'), u"My name is Fred")
  978. self.assertEqual(u"My name is {0[name]}".format(dict(name=u'Fred')),
  979. u"My name is Fred")
  980. self.assertEqual(u"My name is {0} :-{{}}".format(u'Fred'),
  981. u"My name is Fred :-{}")
  982. # datetime.__format__ doesn't work with uniā€¦

Large files files are truncated, but you can click here to view the full file