PageRenderTime 64ms CodeModel.GetById 33ms RepoModel.GetById 0ms app.codeStats 0ms

/pandas/io/tests/test_packers.py

http://github.com/wesm/pandas
Python | 888 lines | 672 code | 168 blank | 48 comment | 83 complexity | 81055d3e95c81b0143b682f48d5e412a MD5 | raw file
Possible License(s): BSD-3-Clause, Apache-2.0
  1. import nose
  2. import os
  3. import datetime
  4. import numpy as np
  5. import sys
  6. from distutils.version import LooseVersion
  7. from pandas import compat
  8. from pandas.compat import u, PY3
  9. from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range,
  10. date_range, period_range, Index, Categorical)
  11. from pandas.core.common import PerformanceWarning
  12. from pandas.io.packers import to_msgpack, read_msgpack
  13. import pandas.util.testing as tm
  14. from pandas.util.testing import (ensure_clean,
  15. assert_categorical_equal,
  16. assert_frame_equal,
  17. assert_index_equal,
  18. assert_series_equal,
  19. patch)
  20. from pandas.tests.test_panel import assert_panel_equal
  21. import pandas
  22. from pandas import Timestamp, NaT, tslib
  23. nan = np.nan
  24. try:
  25. import blosc # NOQA
  26. except ImportError:
  27. _BLOSC_INSTALLED = False
  28. else:
  29. _BLOSC_INSTALLED = True
  30. try:
  31. import zlib # NOQA
  32. except ImportError:
  33. _ZLIB_INSTALLED = False
  34. else:
  35. _ZLIB_INSTALLED = True
  36. _multiprocess_can_split_ = False
  37. def check_arbitrary(a, b):
  38. if isinstance(a, (list, tuple)) and isinstance(b, (list, tuple)):
  39. assert(len(a) == len(b))
  40. for a_, b_ in zip(a, b):
  41. check_arbitrary(a_, b_)
  42. elif isinstance(a, Panel):
  43. assert_panel_equal(a, b)
  44. elif isinstance(a, DataFrame):
  45. assert_frame_equal(a, b)
  46. elif isinstance(a, Series):
  47. assert_series_equal(a, b)
  48. elif isinstance(a, Index):
  49. assert_index_equal(a, b)
  50. elif isinstance(a, Categorical):
  51. # Temp,
  52. # Categorical.categories is changed from str to bytes in PY3
  53. # maybe the same as GH 13591
  54. if PY3 and b.categories.inferred_type == 'string':
  55. pass
  56. else:
  57. tm.assert_categorical_equal(a, b)
  58. elif a is NaT:
  59. assert b is NaT
  60. elif isinstance(a, Timestamp):
  61. assert a == b
  62. assert a.freq == b.freq
  63. else:
  64. assert(a == b)
  65. class TestPackers(tm.TestCase):
  66. def setUp(self):
  67. self.path = '__%s__.msg' % tm.rands(10)
  68. def tearDown(self):
  69. pass
  70. def encode_decode(self, x, compress=None, **kwargs):
  71. with ensure_clean(self.path) as p:
  72. to_msgpack(p, x, compress=compress, **kwargs)
  73. return read_msgpack(p, **kwargs)
  74. class TestAPI(TestPackers):
  75. def test_string_io(self):
  76. df = DataFrame(np.random.randn(10, 2))
  77. s = df.to_msgpack(None)
  78. result = read_msgpack(s)
  79. tm.assert_frame_equal(result, df)
  80. s = df.to_msgpack()
  81. result = read_msgpack(s)
  82. tm.assert_frame_equal(result, df)
  83. s = df.to_msgpack()
  84. result = read_msgpack(compat.BytesIO(s))
  85. tm.assert_frame_equal(result, df)
  86. s = to_msgpack(None, df)
  87. result = read_msgpack(s)
  88. tm.assert_frame_equal(result, df)
  89. with ensure_clean(self.path) as p:
  90. s = df.to_msgpack()
  91. fh = open(p, 'wb')
  92. fh.write(s)
  93. fh.close()
  94. result = read_msgpack(p)
  95. tm.assert_frame_equal(result, df)
  96. def test_iterator_with_string_io(self):
  97. dfs = [DataFrame(np.random.randn(10, 2)) for i in range(5)]
  98. s = to_msgpack(None, *dfs)
  99. for i, result in enumerate(read_msgpack(s, iterator=True)):
  100. tm.assert_frame_equal(result, dfs[i])
  101. def test_invalid_arg(self):
  102. # GH10369
  103. class A(object):
  104. def __init__(self):
  105. self.read = 0
  106. tm.assertRaises(ValueError, read_msgpack, path_or_buf=None)
  107. tm.assertRaises(ValueError, read_msgpack, path_or_buf={})
  108. tm.assertRaises(ValueError, read_msgpack, path_or_buf=A())
  109. class TestNumpy(TestPackers):
  110. def test_numpy_scalar_float(self):
  111. x = np.float32(np.random.rand())
  112. x_rec = self.encode_decode(x)
  113. tm.assert_almost_equal(x, x_rec)
  114. def test_numpy_scalar_complex(self):
  115. x = np.complex64(np.random.rand() + 1j * np.random.rand())
  116. x_rec = self.encode_decode(x)
  117. self.assertTrue(np.allclose(x, x_rec))
  118. def test_scalar_float(self):
  119. x = np.random.rand()
  120. x_rec = self.encode_decode(x)
  121. tm.assert_almost_equal(x, x_rec)
  122. def test_scalar_complex(self):
  123. x = np.random.rand() + 1j * np.random.rand()
  124. x_rec = self.encode_decode(x)
  125. self.assertTrue(np.allclose(x, x_rec))
  126. def test_list_numpy_float(self):
  127. x = [np.float32(np.random.rand()) for i in range(5)]
  128. x_rec = self.encode_decode(x)
  129. # current msgpack cannot distinguish list/tuple
  130. tm.assert_almost_equal(tuple(x), x_rec)
  131. x_rec = self.encode_decode(tuple(x))
  132. tm.assert_almost_equal(tuple(x), x_rec)
  133. def test_list_numpy_float_complex(self):
  134. if not hasattr(np, 'complex128'):
  135. raise nose.SkipTest('numpy cant handle complex128')
  136. x = [np.float32(np.random.rand()) for i in range(5)] + \
  137. [np.complex128(np.random.rand() + 1j * np.random.rand())
  138. for i in range(5)]
  139. x_rec = self.encode_decode(x)
  140. self.assertTrue(np.allclose(x, x_rec))
  141. def test_list_float(self):
  142. x = [np.random.rand() for i in range(5)]
  143. x_rec = self.encode_decode(x)
  144. # current msgpack cannot distinguish list/tuple
  145. tm.assert_almost_equal(tuple(x), x_rec)
  146. x_rec = self.encode_decode(tuple(x))
  147. tm.assert_almost_equal(tuple(x), x_rec)
  148. def test_list_float_complex(self):
  149. x = [np.random.rand() for i in range(5)] + \
  150. [(np.random.rand() + 1j * np.random.rand()) for i in range(5)]
  151. x_rec = self.encode_decode(x)
  152. self.assertTrue(np.allclose(x, x_rec))
  153. def test_dict_float(self):
  154. x = {'foo': 1.0, 'bar': 2.0}
  155. x_rec = self.encode_decode(x)
  156. tm.assert_almost_equal(x, x_rec)
  157. def test_dict_complex(self):
  158. x = {'foo': 1.0 + 1.0j, 'bar': 2.0 + 2.0j}
  159. x_rec = self.encode_decode(x)
  160. self.assertEqual(x, x_rec)
  161. for key in x:
  162. self.assertEqual(type(x[key]), type(x_rec[key]))
  163. def test_dict_numpy_float(self):
  164. x = {'foo': np.float32(1.0), 'bar': np.float32(2.0)}
  165. x_rec = self.encode_decode(x)
  166. tm.assert_almost_equal(x, x_rec)
  167. def test_dict_numpy_complex(self):
  168. x = {'foo': np.complex128(1.0 + 1.0j),
  169. 'bar': np.complex128(2.0 + 2.0j)}
  170. x_rec = self.encode_decode(x)
  171. self.assertEqual(x, x_rec)
  172. for key in x:
  173. self.assertEqual(type(x[key]), type(x_rec[key]))
  174. def test_numpy_array_float(self):
  175. # run multiple times
  176. for n in range(10):
  177. x = np.random.rand(10)
  178. for dtype in ['float32', 'float64']:
  179. x = x.astype(dtype)
  180. x_rec = self.encode_decode(x)
  181. tm.assert_almost_equal(x, x_rec)
  182. def test_numpy_array_complex(self):
  183. x = (np.random.rand(5) + 1j * np.random.rand(5)).astype(np.complex128)
  184. x_rec = self.encode_decode(x)
  185. self.assertTrue(all(map(lambda x, y: x == y, x, x_rec)) and
  186. x.dtype == x_rec.dtype)
  187. def test_list_mixed(self):
  188. x = [1.0, np.float32(3.5), np.complex128(4.25), u('foo')]
  189. x_rec = self.encode_decode(x)
  190. # current msgpack cannot distinguish list/tuple
  191. tm.assert_almost_equal(tuple(x), x_rec)
  192. x_rec = self.encode_decode(tuple(x))
  193. tm.assert_almost_equal(tuple(x), x_rec)
  194. class TestBasic(TestPackers):
  195. def test_timestamp(self):
  196. for i in [Timestamp(
  197. '20130101'), Timestamp('20130101', tz='US/Eastern'),
  198. Timestamp('201301010501')]:
  199. i_rec = self.encode_decode(i)
  200. self.assertEqual(i, i_rec)
  201. def test_nat(self):
  202. nat_rec = self.encode_decode(NaT)
  203. self.assertIs(NaT, nat_rec)
  204. def test_datetimes(self):
  205. # fails under 2.6/win32 (np.datetime64 seems broken)
  206. if LooseVersion(sys.version) < '2.7':
  207. raise nose.SkipTest('2.6 with np.datetime64 is broken')
  208. for i in [datetime.datetime(2013, 1, 1),
  209. datetime.datetime(2013, 1, 1, 5, 1),
  210. datetime.date(2013, 1, 1),
  211. np.datetime64(datetime.datetime(2013, 1, 5, 2, 15))]:
  212. i_rec = self.encode_decode(i)
  213. self.assertEqual(i, i_rec)
  214. def test_timedeltas(self):
  215. for i in [datetime.timedelta(days=1),
  216. datetime.timedelta(days=1, seconds=10),
  217. np.timedelta64(1000000)]:
  218. i_rec = self.encode_decode(i)
  219. self.assertEqual(i, i_rec)
  220. class TestIndex(TestPackers):
  221. def setUp(self):
  222. super(TestIndex, self).setUp()
  223. self.d = {
  224. 'string': tm.makeStringIndex(100),
  225. 'date': tm.makeDateIndex(100),
  226. 'int': tm.makeIntIndex(100),
  227. 'rng': tm.makeRangeIndex(100),
  228. 'float': tm.makeFloatIndex(100),
  229. 'empty': Index([]),
  230. 'tuple': Index(zip(['foo', 'bar', 'baz'], [1, 2, 3])),
  231. 'period': Index(period_range('2012-1-1', freq='M', periods=3)),
  232. 'date2': Index(date_range('2013-01-1', periods=10)),
  233. 'bdate': Index(bdate_range('2013-01-02', periods=10)),
  234. }
  235. self.mi = {
  236. 'reg': MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'),
  237. ('foo', 'two'),
  238. ('qux', 'one'), ('qux', 'two')],
  239. names=['first', 'second']),
  240. }
  241. def test_basic_index(self):
  242. for s, i in self.d.items():
  243. i_rec = self.encode_decode(i)
  244. self.assert_index_equal(i, i_rec)
  245. # datetime with no freq (GH5506)
  246. i = Index([Timestamp('20130101'), Timestamp('20130103')])
  247. i_rec = self.encode_decode(i)
  248. self.assert_index_equal(i, i_rec)
  249. # datetime with timezone
  250. i = Index([Timestamp('20130101 9:00:00'), Timestamp(
  251. '20130103 11:00:00')]).tz_localize('US/Eastern')
  252. i_rec = self.encode_decode(i)
  253. self.assert_index_equal(i, i_rec)
  254. def test_multi_index(self):
  255. for s, i in self.mi.items():
  256. i_rec = self.encode_decode(i)
  257. self.assert_index_equal(i, i_rec)
  258. def test_unicode(self):
  259. i = tm.makeUnicodeIndex(100)
  260. i_rec = self.encode_decode(i)
  261. self.assert_index_equal(i, i_rec)
  262. class TestSeries(TestPackers):
  263. def setUp(self):
  264. super(TestSeries, self).setUp()
  265. self.d = {}
  266. s = tm.makeStringSeries()
  267. s.name = 'string'
  268. self.d['string'] = s
  269. s = tm.makeObjectSeries()
  270. s.name = 'object'
  271. self.d['object'] = s
  272. s = Series(tslib.iNaT, dtype='M8[ns]', index=range(5))
  273. self.d['date'] = s
  274. data = {
  275. 'A': [0., 1., 2., 3., np.nan],
  276. 'B': [0, 1, 0, 1, 0],
  277. 'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
  278. 'D': date_range('1/1/2009', periods=5),
  279. 'E': [0., 1, Timestamp('20100101'), 'foo', 2.],
  280. 'F': [Timestamp('20130102', tz='US/Eastern')] * 2 +
  281. [Timestamp('20130603', tz='CET')] * 3,
  282. 'G': [Timestamp('20130102', tz='US/Eastern')] * 5,
  283. }
  284. self.d['float'] = Series(data['A'])
  285. self.d['int'] = Series(data['B'])
  286. self.d['mixed'] = Series(data['E'])
  287. self.d['dt_tz_mixed'] = Series(data['F'])
  288. self.d['dt_tz'] = Series(data['G'])
  289. def test_basic(self):
  290. # run multiple times here
  291. for n in range(10):
  292. for s, i in self.d.items():
  293. i_rec = self.encode_decode(i)
  294. assert_series_equal(i, i_rec)
  295. class TestCategorical(TestPackers):
  296. def setUp(self):
  297. super(TestCategorical, self).setUp()
  298. self.d = {}
  299. self.d['plain_str'] = Categorical(['a', 'b', 'c', 'd', 'e'])
  300. self.d['plain_str_ordered'] = Categorical(['a', 'b', 'c', 'd', 'e'],
  301. ordered=True)
  302. self.d['plain_int'] = Categorical([5, 6, 7, 8])
  303. self.d['plain_int_ordered'] = Categorical([5, 6, 7, 8], ordered=True)
  304. def test_basic(self):
  305. # run multiple times here
  306. for n in range(10):
  307. for s, i in self.d.items():
  308. i_rec = self.encode_decode(i)
  309. assert_categorical_equal(i, i_rec)
  310. class TestNDFrame(TestPackers):
  311. def setUp(self):
  312. super(TestNDFrame, self).setUp()
  313. data = {
  314. 'A': [0., 1., 2., 3., np.nan],
  315. 'B': [0, 1, 0, 1, 0],
  316. 'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
  317. 'D': date_range('1/1/2009', periods=5),
  318. 'E': [0., 1, Timestamp('20100101'), 'foo', 2.],
  319. 'F': [Timestamp('20130102', tz='US/Eastern')] * 5,
  320. 'G': [Timestamp('20130603', tz='CET')] * 5,
  321. 'H': Categorical(['a', 'b', 'c', 'd', 'e']),
  322. 'I': Categorical(['a', 'b', 'c', 'd', 'e'], ordered=True),
  323. }
  324. self.frame = {
  325. 'float': DataFrame(dict(A=data['A'], B=Series(data['A']) + 1)),
  326. 'int': DataFrame(dict(A=data['B'], B=Series(data['B']) + 1)),
  327. 'mixed': DataFrame(data)}
  328. self.panel = {
  329. 'float': Panel(dict(ItemA=self.frame['float'],
  330. ItemB=self.frame['float'] + 1))}
  331. def test_basic_frame(self):
  332. for s, i in self.frame.items():
  333. i_rec = self.encode_decode(i)
  334. assert_frame_equal(i, i_rec)
  335. def test_basic_panel(self):
  336. for s, i in self.panel.items():
  337. i_rec = self.encode_decode(i)
  338. assert_panel_equal(i, i_rec)
  339. def test_multi(self):
  340. i_rec = self.encode_decode(self.frame)
  341. for k in self.frame.keys():
  342. assert_frame_equal(self.frame[k], i_rec[k])
  343. l = tuple([self.frame['float'], self.frame['float'].A,
  344. self.frame['float'].B, None])
  345. l_rec = self.encode_decode(l)
  346. check_arbitrary(l, l_rec)
  347. # this is an oddity in that packed lists will be returned as tuples
  348. l = [self.frame['float'], self.frame['float']
  349. .A, self.frame['float'].B, None]
  350. l_rec = self.encode_decode(l)
  351. self.assertIsInstance(l_rec, tuple)
  352. check_arbitrary(l, l_rec)
  353. def test_iterator(self):
  354. l = [self.frame['float'], self.frame['float']
  355. .A, self.frame['float'].B, None]
  356. with ensure_clean(self.path) as path:
  357. to_msgpack(path, *l)
  358. for i, packed in enumerate(read_msgpack(path, iterator=True)):
  359. check_arbitrary(packed, l[i])
  360. def tests_datetimeindex_freq_issue(self):
  361. # GH 5947
  362. # inferring freq on the datetimeindex
  363. df = DataFrame([1, 2, 3], index=date_range('1/1/2013', '1/3/2013'))
  364. result = self.encode_decode(df)
  365. assert_frame_equal(result, df)
  366. df = DataFrame([1, 2], index=date_range('1/1/2013', '1/2/2013'))
  367. result = self.encode_decode(df)
  368. assert_frame_equal(result, df)
  369. def test_dataframe_duplicate_column_names(self):
  370. # GH 9618
  371. expected_1 = DataFrame(columns=['a', 'a'])
  372. expected_2 = DataFrame(columns=[1] * 100)
  373. expected_2.loc[0] = np.random.randn(100)
  374. expected_3 = DataFrame(columns=[1, 1])
  375. expected_3.loc[0] = ['abc', np.nan]
  376. result_1 = self.encode_decode(expected_1)
  377. result_2 = self.encode_decode(expected_2)
  378. result_3 = self.encode_decode(expected_3)
  379. assert_frame_equal(result_1, expected_1)
  380. assert_frame_equal(result_2, expected_2)
  381. assert_frame_equal(result_3, expected_3)
  382. class TestSparse(TestPackers):
  383. def _check_roundtrip(self, obj, comparator, **kwargs):
  384. # currently these are not implemetned
  385. # i_rec = self.encode_decode(obj)
  386. # comparator(obj, i_rec, **kwargs)
  387. self.assertRaises(NotImplementedError, self.encode_decode, obj)
  388. def test_sparse_series(self):
  389. s = tm.makeStringSeries()
  390. s[3:5] = np.nan
  391. ss = s.to_sparse()
  392. self._check_roundtrip(ss, tm.assert_series_equal,
  393. check_series_type=True)
  394. ss2 = s.to_sparse(kind='integer')
  395. self._check_roundtrip(ss2, tm.assert_series_equal,
  396. check_series_type=True)
  397. ss3 = s.to_sparse(fill_value=0)
  398. self._check_roundtrip(ss3, tm.assert_series_equal,
  399. check_series_type=True)
  400. def test_sparse_frame(self):
  401. s = tm.makeDataFrame()
  402. s.ix[3:5, 1:3] = np.nan
  403. s.ix[8:10, -2] = np.nan
  404. ss = s.to_sparse()
  405. self._check_roundtrip(ss, tm.assert_frame_equal,
  406. check_frame_type=True)
  407. ss2 = s.to_sparse(kind='integer')
  408. self._check_roundtrip(ss2, tm.assert_frame_equal,
  409. check_frame_type=True)
  410. ss3 = s.to_sparse(fill_value=0)
  411. self._check_roundtrip(ss3, tm.assert_frame_equal,
  412. check_frame_type=True)
  413. class TestCompression(TestPackers):
  414. """See https://github.com/pydata/pandas/pull/9783
  415. """
  416. def setUp(self):
  417. try:
  418. from sqlalchemy import create_engine
  419. self._create_sql_engine = create_engine
  420. except ImportError:
  421. self._SQLALCHEMY_INSTALLED = False
  422. else:
  423. self._SQLALCHEMY_INSTALLED = True
  424. super(TestCompression, self).setUp()
  425. data = {
  426. 'A': np.arange(1000, dtype=np.float64),
  427. 'B': np.arange(1000, dtype=np.int32),
  428. 'C': list(100 * 'abcdefghij'),
  429. 'D': date_range(datetime.datetime(2015, 4, 1), periods=1000),
  430. 'E': [datetime.timedelta(days=x) for x in range(1000)],
  431. }
  432. self.frame = {
  433. 'float': DataFrame(dict((k, data[k]) for k in ['A', 'A'])),
  434. 'int': DataFrame(dict((k, data[k]) for k in ['B', 'B'])),
  435. 'mixed': DataFrame(data),
  436. }
  437. def test_plain(self):
  438. i_rec = self.encode_decode(self.frame)
  439. for k in self.frame.keys():
  440. assert_frame_equal(self.frame[k], i_rec[k])
  441. def _test_compression(self, compress):
  442. i_rec = self.encode_decode(self.frame, compress=compress)
  443. for k in self.frame.keys():
  444. value = i_rec[k]
  445. expected = self.frame[k]
  446. assert_frame_equal(value, expected)
  447. # make sure that we can write to the new frames
  448. for block in value._data.blocks:
  449. self.assertTrue(block.values.flags.writeable)
  450. def test_compression_zlib(self):
  451. if not _ZLIB_INSTALLED:
  452. raise nose.SkipTest('no zlib')
  453. self._test_compression('zlib')
  454. def test_compression_blosc(self):
  455. if not _BLOSC_INSTALLED:
  456. raise nose.SkipTest('no blosc')
  457. self._test_compression('blosc')
  458. def _test_compression_warns_when_decompress_caches(self, compress):
  459. not_garbage = []
  460. control = [] # copied data
  461. compress_module = globals()[compress]
  462. real_decompress = compress_module.decompress
  463. def decompress(ob):
  464. """mock decompress function that delegates to the real
  465. decompress but caches the result and a copy of the result.
  466. """
  467. res = real_decompress(ob)
  468. not_garbage.append(res) # hold a reference to this bytes object
  469. control.append(bytearray(res)) # copy the data here to check later
  470. return res
  471. # types mapped to values to add in place.
  472. rhs = {
  473. np.dtype('float64'): 1.0,
  474. np.dtype('int32'): 1,
  475. np.dtype('object'): 'a',
  476. np.dtype('datetime64[ns]'): np.timedelta64(1, 'ns'),
  477. np.dtype('timedelta64[ns]'): np.timedelta64(1, 'ns'),
  478. }
  479. with patch(compress_module, 'decompress', decompress), \
  480. tm.assert_produces_warning(PerformanceWarning) as ws:
  481. i_rec = self.encode_decode(self.frame, compress=compress)
  482. for k in self.frame.keys():
  483. value = i_rec[k]
  484. expected = self.frame[k]
  485. assert_frame_equal(value, expected)
  486. # make sure that we can write to the new frames even though
  487. # we needed to copy the data
  488. for block in value._data.blocks:
  489. self.assertTrue(block.values.flags.writeable)
  490. # mutate the data in some way
  491. block.values[0] += rhs[block.dtype]
  492. for w in ws:
  493. # check the messages from our warnings
  494. self.assertEqual(
  495. str(w.message),
  496. 'copying data after decompressing; this may mean that'
  497. ' decompress is caching its result',
  498. )
  499. for buf, control_buf in zip(not_garbage, control):
  500. # make sure none of our mutations above affected the
  501. # original buffers
  502. self.assertEqual(buf, control_buf)
  503. def test_compression_warns_when_decompress_caches_zlib(self):
  504. if not _ZLIB_INSTALLED:
  505. raise nose.SkipTest('no zlib')
  506. self._test_compression_warns_when_decompress_caches('zlib')
  507. def test_compression_warns_when_decompress_caches_blosc(self):
  508. if not _BLOSC_INSTALLED:
  509. raise nose.SkipTest('no blosc')
  510. self._test_compression_warns_when_decompress_caches('blosc')
  511. def _test_small_strings_no_warn(self, compress):
  512. empty = np.array([], dtype='uint8')
  513. with tm.assert_produces_warning(None):
  514. empty_unpacked = self.encode_decode(empty, compress=compress)
  515. tm.assert_numpy_array_equal(empty_unpacked, empty)
  516. self.assertTrue(empty_unpacked.flags.writeable)
  517. char = np.array([ord(b'a')], dtype='uint8')
  518. with tm.assert_produces_warning(None):
  519. char_unpacked = self.encode_decode(char, compress=compress)
  520. tm.assert_numpy_array_equal(char_unpacked, char)
  521. self.assertTrue(char_unpacked.flags.writeable)
  522. # if this test fails I am sorry because the interpreter is now in a
  523. # bad state where b'a' points to 98 == ord(b'b').
  524. char_unpacked[0] = ord(b'b')
  525. # we compare the ord of bytes b'a' with unicode u'a' because the should
  526. # always be the same (unless we were able to mutate the shared
  527. # character singleton in which case ord(b'a') == ord(b'b').
  528. self.assertEqual(ord(b'a'), ord(u'a'))
  529. tm.assert_numpy_array_equal(
  530. char_unpacked,
  531. np.array([ord(b'b')], dtype='uint8'),
  532. )
  533. def test_small_strings_no_warn_zlib(self):
  534. if not _ZLIB_INSTALLED:
  535. raise nose.SkipTest('no zlib')
  536. self._test_small_strings_no_warn('zlib')
  537. def test_small_strings_no_warn_blosc(self):
  538. if not _BLOSC_INSTALLED:
  539. raise nose.SkipTest('no blosc')
  540. self._test_small_strings_no_warn('blosc')
  541. def test_readonly_axis_blosc(self):
  542. # GH11880
  543. if not _BLOSC_INSTALLED:
  544. raise nose.SkipTest('no blosc')
  545. df1 = DataFrame({'A': list('abcd')})
  546. df2 = DataFrame(df1, index=[1., 2., 3., 4.])
  547. self.assertTrue(1 in self.encode_decode(df1['A'], compress='blosc'))
  548. self.assertTrue(1. in self.encode_decode(df2['A'], compress='blosc'))
  549. def test_readonly_axis_zlib(self):
  550. # GH11880
  551. df1 = DataFrame({'A': list('abcd')})
  552. df2 = DataFrame(df1, index=[1., 2., 3., 4.])
  553. self.assertTrue(1 in self.encode_decode(df1['A'], compress='zlib'))
  554. self.assertTrue(1. in self.encode_decode(df2['A'], compress='zlib'))
  555. def test_readonly_axis_blosc_to_sql(self):
  556. # GH11880
  557. if not _BLOSC_INSTALLED:
  558. raise nose.SkipTest('no blosc')
  559. if not self._SQLALCHEMY_INSTALLED:
  560. raise nose.SkipTest('no sqlalchemy')
  561. expected = DataFrame({'A': list('abcd')})
  562. df = self.encode_decode(expected, compress='blosc')
  563. eng = self._create_sql_engine("sqlite:///:memory:")
  564. df.to_sql('test', eng, if_exists='append')
  565. result = pandas.read_sql_table('test', eng, index_col='index')
  566. result.index.names = [None]
  567. assert_frame_equal(expected, result)
  568. def test_readonly_axis_zlib_to_sql(self):
  569. # GH11880
  570. if not _ZLIB_INSTALLED:
  571. raise nose.SkipTest('no zlib')
  572. if not self._SQLALCHEMY_INSTALLED:
  573. raise nose.SkipTest('no sqlalchemy')
  574. expected = DataFrame({'A': list('abcd')})
  575. df = self.encode_decode(expected, compress='zlib')
  576. eng = self._create_sql_engine("sqlite:///:memory:")
  577. df.to_sql('test', eng, if_exists='append')
  578. result = pandas.read_sql_table('test', eng, index_col='index')
  579. result.index.names = [None]
  580. assert_frame_equal(expected, result)
  581. class TestEncoding(TestPackers):
  582. def setUp(self):
  583. super(TestEncoding, self).setUp()
  584. data = {
  585. 'A': [compat.u('\u2019')] * 1000,
  586. 'B': np.arange(1000, dtype=np.int32),
  587. 'C': list(100 * 'abcdefghij'),
  588. 'D': date_range(datetime.datetime(2015, 4, 1), periods=1000),
  589. 'E': [datetime.timedelta(days=x) for x in range(1000)],
  590. 'G': [400] * 1000
  591. }
  592. self.frame = {
  593. 'float': DataFrame(dict((k, data[k]) for k in ['A', 'A'])),
  594. 'int': DataFrame(dict((k, data[k]) for k in ['B', 'B'])),
  595. 'mixed': DataFrame(data),
  596. }
  597. self.utf_encodings = ['utf8', 'utf16', 'utf32']
  598. def test_utf(self):
  599. # GH10581
  600. for encoding in self.utf_encodings:
  601. for frame in compat.itervalues(self.frame):
  602. result = self.encode_decode(frame, encoding=encoding)
  603. assert_frame_equal(result, frame)
  604. def test_default_encoding(self):
  605. for frame in compat.itervalues(self.frame):
  606. result = frame.to_msgpack()
  607. expected = frame.to_msgpack(encoding='utf8')
  608. self.assertEqual(result, expected)
  609. result = self.encode_decode(frame)
  610. assert_frame_equal(result, frame)
  611. class TestMsgpack():
  612. """
  613. How to add msgpack tests:
  614. 1. Install pandas version intended to output the msgpack.
  615. TestPackers
  616. 2. Execute "generate_legacy_storage_files.py" to create the msgpack.
  617. $ python generate_legacy_storage_files.py <output_dir> msgpack
  618. 3. Move the created pickle to "data/legacy_msgpack/<version>" directory.
  619. NOTE: TestMsgpack can't be a subclass of tm.Testcase to use test generator.
  620. http://stackoverflow.com/questions/6689537/nose-test-generators-inside-class
  621. """
  622. def setUp(self):
  623. from pandas.io.tests.generate_legacy_storage_files import (
  624. create_msgpack_data, create_data)
  625. self.data = create_msgpack_data()
  626. self.all_data = create_data()
  627. self.path = u('__%s__.msgpack' % tm.rands(10))
  628. self.minimum_structure = {'series': ['float', 'int', 'mixed',
  629. 'ts', 'mi', 'dup'],
  630. 'frame': ['float', 'int', 'mixed', 'mi'],
  631. 'panel': ['float'],
  632. 'index': ['int', 'date', 'period'],
  633. 'mi': ['reg2']}
  634. def check_min_structure(self, data):
  635. for typ, v in self.minimum_structure.items():
  636. assert typ in data, '"{0}" not found in unpacked data'.format(typ)
  637. for kind in v:
  638. msg = '"{0}" not found in data["{1}"]'.format(kind, typ)
  639. assert kind in data[typ], msg
  640. def compare(self, vf, version):
  641. # GH12277 encoding default used to be latin-1, now utf-8
  642. if LooseVersion(version) < '0.18.0':
  643. data = read_msgpack(vf, encoding='latin-1')
  644. else:
  645. data = read_msgpack(vf)
  646. self.check_min_structure(data)
  647. for typ, dv in data.items():
  648. assert typ in self.all_data, ('unpacked data contains '
  649. 'extra key "{0}"'
  650. .format(typ))
  651. for dt, result in dv.items():
  652. assert dt in self.all_data[typ], ('data["{0}"] contains extra '
  653. 'key "{1}"'.format(typ, dt))
  654. try:
  655. expected = self.data[typ][dt]
  656. except KeyError:
  657. continue
  658. # use a specific comparator
  659. # if available
  660. comp_method = "compare_{typ}_{dt}".format(typ=typ, dt=dt)
  661. comparator = getattr(self, comp_method, None)
  662. if comparator is not None:
  663. comparator(result, expected, typ, version)
  664. else:
  665. check_arbitrary(result, expected)
  666. return data
  667. def compare_series_dt_tz(self, result, expected, typ, version):
  668. # 8260
  669. # dtype is object < 0.17.0
  670. if LooseVersion(version) < '0.17.0':
  671. expected = expected.astype(object)
  672. tm.assert_series_equal(result, expected)
  673. else:
  674. tm.assert_series_equal(result, expected)
  675. def compare_frame_dt_mixed_tzs(self, result, expected, typ, version):
  676. # 8260
  677. # dtype is object < 0.17.0
  678. if LooseVersion(version) < '0.17.0':
  679. expected = expected.astype(object)
  680. tm.assert_frame_equal(result, expected)
  681. else:
  682. tm.assert_frame_equal(result, expected)
  683. def read_msgpacks(self, version):
  684. pth = tm.get_data_path('legacy_msgpack/{0}'.format(str(version)))
  685. n = 0
  686. for f in os.listdir(pth):
  687. # GH12142 0.17 files packed in P2 can't be read in P3
  688. if (compat.PY3 and version.startswith('0.17.') and
  689. f.split('.')[-4][-1] == '2'):
  690. continue
  691. vf = os.path.join(pth, f)
  692. try:
  693. self.compare(vf, version)
  694. except ImportError:
  695. # blosc not installed
  696. continue
  697. n += 1
  698. assert n > 0, 'Msgpack files are not tested'
  699. def test_msgpack(self):
  700. msgpack_path = tm.get_data_path('legacy_msgpack')
  701. n = 0
  702. for v in os.listdir(msgpack_path):
  703. pth = os.path.join(msgpack_path, v)
  704. if os.path.isdir(pth):
  705. yield self.read_msgpacks, v
  706. n += 1
  707. assert n > 0, 'Msgpack files are not tested'