PageRenderTime 91ms CodeModel.GetById 32ms RepoModel.GetById 1ms app.codeStats 0ms

/pandas/tools/merge.py

http://github.com/wesm/pandas
Python | 1738 lines | 1659 code | 32 blank | 47 comment | 34 complexity | d48cdf70e1ecc2af519bdc184d4289a0 MD5 | raw file
Possible License(s): BSD-3-Clause, Apache-2.0

Large files files are truncated, but you can click here to view the full file

  1. """
  2. SQL-style merge routines
  3. """
  4. import copy
  5. import warnings
  6. import numpy as np
  7. from pandas.compat import range, lrange, lzip, zip, map, filter
  8. import pandas.compat as compat
  9. from pandas import (Categorical, DataFrame, Series,
  10. Index, MultiIndex, Timedelta)
  11. from pandas.core.frame import _merge_doc
  12. from pandas.types.generic import ABCSeries
  13. from pandas.types.common import (is_datetime64tz_dtype,
  14. is_datetime64_dtype,
  15. needs_i8_conversion,
  16. is_int64_dtype,
  17. is_integer_dtype,
  18. is_float_dtype,
  19. is_integer,
  20. is_int_or_datetime_dtype,
  21. is_dtype_equal,
  22. is_bool,
  23. is_list_like,
  24. _ensure_int64,
  25. _ensure_float64,
  26. _ensure_object)
  27. from pandas.types.missing import na_value_for_dtype
  28. from pandas.core.generic import NDFrame
  29. from pandas.core.index import (_get_combined_index,
  30. _ensure_index, _get_consensus_names,
  31. _all_indexes_same)
  32. from pandas.core.internals import (items_overlap_with_suffix,
  33. concatenate_block_managers)
  34. from pandas.util.decorators import Appender, Substitution
  35. import pandas.core.algorithms as algos
  36. import pandas.core.common as com
  37. import pandas.types.concat as _concat
  38. import pandas._join as _join
  39. import pandas.hashtable as _hash
  40. @Substitution('\nleft : DataFrame')
  41. @Appender(_merge_doc, indents=0)
  42. def merge(left, right, how='inner', on=None, left_on=None, right_on=None,
  43. left_index=False, right_index=False, sort=False,
  44. suffixes=('_x', '_y'), copy=True, indicator=False):
  45. op = _MergeOperation(left, right, how=how, on=on, left_on=left_on,
  46. right_on=right_on, left_index=left_index,
  47. right_index=right_index, sort=sort, suffixes=suffixes,
  48. copy=copy, indicator=indicator)
  49. return op.get_result()
  50. if __debug__:
  51. merge.__doc__ = _merge_doc % '\nleft : DataFrame'
  52. class MergeError(ValueError):
  53. pass
  54. def _groupby_and_merge(by, on, left, right, _merge_pieces,
  55. check_duplicates=True):
  56. """
  57. groupby & merge; we are always performing a left-by type operation
  58. Parameters
  59. ----------
  60. by: field to group
  61. on: duplicates field
  62. left: left frame
  63. right: right frame
  64. _merge_pieces: function for merging
  65. check_duplicates: boolean, default True
  66. should we check & clean duplicates
  67. """
  68. pieces = []
  69. if not isinstance(by, (list, tuple)):
  70. by = [by]
  71. lby = left.groupby(by, sort=False)
  72. # if we can groupby the rhs
  73. # then we can get vastly better perf
  74. try:
  75. # we will check & remove duplicates if indicated
  76. if check_duplicates:
  77. if on is None:
  78. on = []
  79. elif not isinstance(on, (list, tuple)):
  80. on = [on]
  81. if right.duplicated(by + on).any():
  82. right = right.drop_duplicates(by + on, keep='last')
  83. rby = right.groupby(by, sort=False)
  84. except KeyError:
  85. rby = None
  86. for key, lhs in lby:
  87. if rby is None:
  88. rhs = right
  89. else:
  90. try:
  91. rhs = right.take(rby.indices[key])
  92. except KeyError:
  93. # key doesn't exist in left
  94. lcols = lhs.columns.tolist()
  95. cols = lcols + [r for r in right.columns
  96. if r not in set(lcols)]
  97. merged = lhs.reindex(columns=cols)
  98. merged.index = range(len(merged))
  99. pieces.append(merged)
  100. continue
  101. merged = _merge_pieces(lhs, rhs)
  102. # make sure join keys are in the merged
  103. # TODO, should _merge_pieces do this?
  104. for k in by:
  105. try:
  106. if k in merged:
  107. merged[k] = key
  108. except:
  109. pass
  110. pieces.append(merged)
  111. # preserve the original order
  112. # if we have a missing piece this can be reset
  113. result = concat(pieces, ignore_index=True)
  114. result = result.reindex(columns=pieces[0].columns, copy=False)
  115. return result, lby
  116. def ordered_merge(left, right, on=None,
  117. left_on=None, right_on=None,
  118. left_by=None, right_by=None,
  119. fill_method=None, suffixes=('_x', '_y')):
  120. warnings.warn("ordered_merge is deprecated and replace by merged_ordered",
  121. FutureWarning, stacklevel=2)
  122. return merge_ordered(left, right, on=on,
  123. left_on=left_on, right_on=right_on,
  124. left_by=left_by, right_by=right_by,
  125. fill_method=fill_method, suffixes=suffixes)
  126. def merge_ordered(left, right, on=None,
  127. left_on=None, right_on=None,
  128. left_by=None, right_by=None,
  129. fill_method=None, suffixes=('_x', '_y'),
  130. how='outer'):
  131. """Perform merge with optional filling/interpolation designed for ordered
  132. data like time series data. Optionally perform group-wise merge (see
  133. examples)
  134. Parameters
  135. ----------
  136. left : DataFrame
  137. right : DataFrame
  138. on : label or list
  139. Field names to join on. Must be found in both DataFrames.
  140. left_on : label or list, or array-like
  141. Field names to join on in left DataFrame. Can be a vector or list of
  142. vectors of the length of the DataFrame to use a particular vector as
  143. the join key instead of columns
  144. right_on : label or list, or array-like
  145. Field names to join on in right DataFrame or vector/list of vectors per
  146. left_on docs
  147. left_by : column name or list of column names
  148. Group left DataFrame by group columns and merge piece by piece with
  149. right DataFrame
  150. right_by : column name or list of column names
  151. Group right DataFrame by group columns and merge piece by piece with
  152. left DataFrame
  153. fill_method : {'ffill', None}, default None
  154. Interpolation method for data
  155. suffixes : 2-length sequence (tuple, list, ...)
  156. Suffix to apply to overlapping column names in the left and right
  157. side, respectively
  158. how : {'left', 'right', 'outer', 'inner'}, default 'outer'
  159. * left: use only keys from left frame (SQL: left outer join)
  160. * right: use only keys from right frame (SQL: right outer join)
  161. * outer: use union of keys from both frames (SQL: full outer join)
  162. * inner: use intersection of keys from both frames (SQL: inner join)
  163. .. versionadded:: 0.19.0
  164. Examples
  165. --------
  166. >>> A >>> B
  167. key lvalue group key rvalue
  168. 0 a 1 a 0 b 1
  169. 1 c 2 a 1 c 2
  170. 2 e 3 a 2 d 3
  171. 3 a 1 b
  172. 4 c 2 b
  173. 5 e 3 b
  174. >>> ordered_merge(A, B, fill_method='ffill', left_by='group')
  175. key lvalue group rvalue
  176. 0 a 1 a NaN
  177. 1 b 1 a 1
  178. 2 c 2 a 2
  179. 3 d 2 a 3
  180. 4 e 3 a 3
  181. 5 f 3 a 4
  182. 6 a 1 b NaN
  183. 7 b 1 b 1
  184. 8 c 2 b 2
  185. 9 d 2 b 3
  186. 10 e 3 b 3
  187. 11 f 3 b 4
  188. Returns
  189. -------
  190. merged : DataFrame
  191. The output type will the be same as 'left', if it is a subclass
  192. of DataFrame.
  193. See also
  194. --------
  195. merge
  196. merge_asof
  197. """
  198. def _merger(x, y):
  199. # perform the ordered merge operation
  200. op = _OrderedMerge(x, y, on=on, left_on=left_on, right_on=right_on,
  201. suffixes=suffixes, fill_method=fill_method,
  202. how=how)
  203. return op.get_result()
  204. if left_by is not None and right_by is not None:
  205. raise ValueError('Can only group either left or right frames')
  206. elif left_by is not None:
  207. result, _ = _groupby_and_merge(left_by, on, left, right,
  208. lambda x, y: _merger(x, y),
  209. check_duplicates=False)
  210. elif right_by is not None:
  211. result, _ = _groupby_and_merge(right_by, on, right, left,
  212. lambda x, y: _merger(y, x),
  213. check_duplicates=False)
  214. else:
  215. result = _merger(left, right)
  216. return result
  217. ordered_merge.__doc__ = merge_ordered.__doc__
  218. def merge_asof(left, right, on=None,
  219. left_on=None, right_on=None,
  220. by=None,
  221. suffixes=('_x', '_y'),
  222. tolerance=None,
  223. allow_exact_matches=True):
  224. """Perform an asof merge. This is similar to a left-join except that we
  225. match on nearest key rather than equal keys.
  226. For each row in the left DataFrame, we select the last row in the right
  227. DataFrame whose 'on' key is less than or equal to the left's key. Both
  228. DataFrames must be sorted by the key.
  229. Optionally perform group-wise merge. This searches for the nearest match
  230. on the 'on' key within the same group according to 'by'.
  231. .. versionadded:: 0.19.0
  232. Parameters
  233. ----------
  234. left : DataFrame
  235. right : DataFrame
  236. on : label
  237. Field name to join on. Must be found in both DataFrames.
  238. The data MUST be ordered. Furthermore this must be a numeric column,
  239. such as datetimelike, integer, or float. On or left_on/right_on
  240. must be given.
  241. left_on : label
  242. Field name to join on in left DataFrame.
  243. right_on : label
  244. Field name to join on in right DataFrame.
  245. by : column name
  246. Group both the left and right DataFrames by the group column; perform
  247. the merge operation on these pieces and recombine.
  248. suffixes : 2-length sequence (tuple, list, ...)
  249. Suffix to apply to overlapping column names in the left and right
  250. side, respectively
  251. tolerance : integer or Timedelta, optional, default None
  252. select asof tolerance within this range; must be compatible
  253. to the merge index.
  254. allow_exact_matches : boolean, default True
  255. - If True, allow matching the same 'on' value
  256. (i.e. less-than-or-equal-to)
  257. - If False, don't match the same 'on' value
  258. (i.e., stricly less-than)
  259. Returns
  260. -------
  261. merged : DataFrame
  262. Examples
  263. --------
  264. >>> left
  265. a left_val
  266. 0 1 a
  267. 1 5 b
  268. 2 10 c
  269. >>> right
  270. a right_val
  271. 0 1 1
  272. 1 2 2
  273. 2 3 3
  274. 3 6 6
  275. 4 7 7
  276. >>> pd.merge_asof(left, right, on='a')
  277. a left_val right_val
  278. 0 1 a 1
  279. 1 5 b 3
  280. 2 10 c 7
  281. >>> pd.merge_asof(left, right, on='a', allow_exact_matches=False)
  282. a left_val right_val
  283. 0 1 a NaN
  284. 1 5 b 3.0
  285. 2 10 c 7.0
  286. For this example, we can achieve a similar result thru
  287. ``pd.merge_ordered()``, though its not nearly as performant.
  288. >>> (pd.merge_ordered(left, right, on='a')
  289. ... .ffill()
  290. ... .drop_duplicates(['left_val'])
  291. ... )
  292. a left_val right_val
  293. 0 1 a 1.0
  294. 3 5 b 3.0
  295. 6 10 c 7.0
  296. Here is a real-world times-series example
  297. >>> quotes
  298. time ticker bid ask
  299. 0 2016-05-25 13:30:00.023 GOOG 720.50 720.93
  300. 1 2016-05-25 13:30:00.023 MSFT 51.95 51.96
  301. 2 2016-05-25 13:30:00.030 MSFT 51.97 51.98
  302. 3 2016-05-25 13:30:00.041 MSFT 51.99 52.00
  303. 4 2016-05-25 13:30:00.048 GOOG 720.50 720.93
  304. 5 2016-05-25 13:30:00.049 AAPL 97.99 98.01
  305. 6 2016-05-25 13:30:00.072 GOOG 720.50 720.88
  306. 7 2016-05-25 13:30:00.075 MSFT 52.01 52.03
  307. >>> trades
  308. time ticker price quantity
  309. 0 2016-05-25 13:30:00.023 MSFT 51.95 75
  310. 1 2016-05-25 13:30:00.038 MSFT 51.95 155
  311. 2 2016-05-25 13:30:00.048 GOOG 720.77 100
  312. 3 2016-05-25 13:30:00.048 GOOG 720.92 100
  313. 4 2016-05-25 13:30:00.048 AAPL 98.00 100
  314. By default we are taking the asof of the quotes
  315. >>> pd.asof_merge(trades, quotes,
  316. ... on='time',
  317. ... by='ticker')
  318. time ticker price quantity bid ask
  319. 0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96
  320. 1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98
  321. 2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
  322. 3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
  323. 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
  324. We only asof within 2ms betwen the quote time and the trade time
  325. >>> pd.asof_merge(trades, quotes,
  326. ... on='time',
  327. ... by='ticker',
  328. ... tolerance=pd.Timedelta('2ms'))
  329. time ticker price quantity bid ask
  330. 0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96
  331. 1 2016-05-25 13:30:00.038 MSFT 51.95 155 NaN NaN
  332. 2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
  333. 3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
  334. 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
  335. We only asof within 10ms betwen the quote time and the trade time
  336. and we exclude exact matches on time. However *prior* data will
  337. propogate forward
  338. >>> pd.asof_merge(trades, quotes,
  339. ... on='time',
  340. ... by='ticker',
  341. ... tolerance=pd.Timedelta('10ms'),
  342. ... allow_exact_matches=False)
  343. time ticker price quantity bid ask
  344. 0 2016-05-25 13:30:00.023 MSFT 51.95 75 NaN NaN
  345. 1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98
  346. 2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
  347. 3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
  348. 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
  349. See also
  350. --------
  351. merge
  352. merge_ordered
  353. """
  354. op = _AsOfMerge(left, right,
  355. on=on, left_on=left_on, right_on=right_on,
  356. by=by, suffixes=suffixes,
  357. how='asof', tolerance=tolerance,
  358. allow_exact_matches=allow_exact_matches)
  359. return op.get_result()
  360. # TODO: transformations??
  361. # TODO: only copy DataFrames when modification necessary
  362. class _MergeOperation(object):
  363. """
  364. Perform a database (SQL) merge operation between two DataFrame objects
  365. using either columns as keys or their row indexes
  366. """
  367. _merge_type = 'merge'
  368. def __init__(self, left, right, how='inner', on=None,
  369. left_on=None, right_on=None, axis=1,
  370. left_index=False, right_index=False, sort=True,
  371. suffixes=('_x', '_y'), copy=True, indicator=False):
  372. self.left = self.orig_left = left
  373. self.right = self.orig_right = right
  374. self.how = how
  375. self.axis = axis
  376. self.on = com._maybe_make_list(on)
  377. self.left_on = com._maybe_make_list(left_on)
  378. self.right_on = com._maybe_make_list(right_on)
  379. self.copy = copy
  380. self.suffixes = suffixes
  381. self.sort = sort
  382. self.left_index = left_index
  383. self.right_index = right_index
  384. self.indicator = indicator
  385. if isinstance(self.indicator, compat.string_types):
  386. self.indicator_name = self.indicator
  387. elif isinstance(self.indicator, bool):
  388. self.indicator_name = '_merge' if self.indicator else None
  389. else:
  390. raise ValueError(
  391. 'indicator option can only accept boolean or string arguments')
  392. if not isinstance(left, DataFrame):
  393. raise ValueError(
  394. 'can not merge DataFrame with instance of '
  395. 'type {0}'.format(type(left)))
  396. if not isinstance(right, DataFrame):
  397. raise ValueError(
  398. 'can not merge DataFrame with instance of '
  399. 'type {0}'.format(type(right)))
  400. # warn user when merging between different levels
  401. if left.columns.nlevels != right.columns.nlevels:
  402. msg = ('merging between different levels can give an unintended '
  403. 'result ({0} levels on the left, {1} on the right)')
  404. msg = msg.format(left.columns.nlevels, right.columns.nlevels)
  405. warnings.warn(msg, UserWarning)
  406. self._validate_specification()
  407. # note this function has side effects
  408. (self.left_join_keys,
  409. self.right_join_keys,
  410. self.join_names) = self._get_merge_keys()
  411. def get_result(self):
  412. if self.indicator:
  413. self.left, self.right = self._indicator_pre_merge(
  414. self.left, self.right)
  415. join_index, left_indexer, right_indexer = self._get_join_info()
  416. ldata, rdata = self.left._data, self.right._data
  417. lsuf, rsuf = self.suffixes
  418. llabels, rlabels = items_overlap_with_suffix(ldata.items, lsuf,
  419. rdata.items, rsuf)
  420. lindexers = {1: left_indexer} if left_indexer is not None else {}
  421. rindexers = {1: right_indexer} if right_indexer is not None else {}
  422. result_data = concatenate_block_managers(
  423. [(ldata, lindexers), (rdata, rindexers)],
  424. axes=[llabels.append(rlabels), join_index],
  425. concat_axis=0, copy=self.copy)
  426. typ = self.left._constructor
  427. result = typ(result_data).__finalize__(self, method=self._merge_type)
  428. if self.indicator:
  429. result = self._indicator_post_merge(result)
  430. self._maybe_add_join_keys(result, left_indexer, right_indexer)
  431. return result
  432. def _indicator_pre_merge(self, left, right):
  433. columns = left.columns.union(right.columns)
  434. for i in ['_left_indicator', '_right_indicator']:
  435. if i in columns:
  436. raise ValueError("Cannot use `indicator=True` option when "
  437. "data contains a column named {}".format(i))
  438. if self.indicator_name in columns:
  439. raise ValueError(
  440. "Cannot use name of an existing column for indicator column")
  441. left = left.copy()
  442. right = right.copy()
  443. left['_left_indicator'] = 1
  444. left['_left_indicator'] = left['_left_indicator'].astype('int8')
  445. right['_right_indicator'] = 2
  446. right['_right_indicator'] = right['_right_indicator'].astype('int8')
  447. return left, right
  448. def _indicator_post_merge(self, result):
  449. result['_left_indicator'] = result['_left_indicator'].fillna(0)
  450. result['_right_indicator'] = result['_right_indicator'].fillna(0)
  451. result[self.indicator_name] = Categorical((result['_left_indicator'] +
  452. result['_right_indicator']),
  453. categories=[1, 2, 3])
  454. result[self.indicator_name] = (
  455. result[self.indicator_name]
  456. .cat.rename_categories(['left_only', 'right_only', 'both']))
  457. result = result.drop(labels=['_left_indicator', '_right_indicator'],
  458. axis=1)
  459. return result
  460. def _maybe_add_join_keys(self, result, left_indexer, right_indexer):
  461. left_has_missing = None
  462. right_has_missing = None
  463. keys = zip(self.join_names, self.left_on, self.right_on)
  464. for i, (name, lname, rname) in enumerate(keys):
  465. if not _should_fill(lname, rname):
  466. continue
  467. take_left, take_right = None, None
  468. if name in result:
  469. if left_indexer is not None and right_indexer is not None:
  470. if name in self.left:
  471. if left_has_missing is None:
  472. left_has_missing = any(left_indexer == -1)
  473. if left_has_missing:
  474. take_right = self.right_join_keys[i]
  475. if not is_dtype_equal(result[name].dtype,
  476. self.left[name].dtype):
  477. take_left = self.left[name]._values
  478. elif name in self.right:
  479. if right_has_missing is None:
  480. right_has_missing = any(right_indexer == -1)
  481. if right_has_missing:
  482. take_left = self.left_join_keys[i]
  483. if not is_dtype_equal(result[name].dtype,
  484. self.right[name].dtype):
  485. take_right = self.right[name]._values
  486. elif left_indexer is not None \
  487. and isinstance(self.left_join_keys[i], np.ndarray):
  488. take_left = self.left_join_keys[i]
  489. take_right = self.right_join_keys[i]
  490. if take_left is not None or take_right is not None:
  491. if take_left is None:
  492. lvals = result[name]._values
  493. else:
  494. lfill = na_value_for_dtype(take_left.dtype)
  495. lvals = algos.take_1d(take_left, left_indexer,
  496. fill_value=lfill)
  497. if take_right is None:
  498. rvals = result[name]._values
  499. else:
  500. rfill = na_value_for_dtype(take_right.dtype)
  501. rvals = algos.take_1d(take_right, right_indexer,
  502. fill_value=rfill)
  503. # if we have an all missing left_indexer
  504. # make sure to just use the right values
  505. mask = left_indexer == -1
  506. if mask.all():
  507. key_col = rvals
  508. else:
  509. key_col = Index(lvals).where(~mask, rvals)
  510. if name in result:
  511. result[name] = key_col
  512. else:
  513. result.insert(i, name or 'key_%d' % i, key_col)
  514. def _get_join_indexers(self):
  515. """ return the join indexers """
  516. return _get_join_indexers(self.left_join_keys,
  517. self.right_join_keys,
  518. sort=self.sort,
  519. how=self.how)
  520. def _get_join_info(self):
  521. left_ax = self.left._data.axes[self.axis]
  522. right_ax = self.right._data.axes[self.axis]
  523. if self.left_index and self.right_index:
  524. join_index, left_indexer, right_indexer = \
  525. left_ax.join(right_ax, how=self.how, return_indexers=True)
  526. elif self.right_index and self.how == 'left':
  527. join_index, left_indexer, right_indexer = \
  528. _left_join_on_index(left_ax, right_ax, self.left_join_keys,
  529. sort=self.sort)
  530. elif self.left_index and self.how == 'right':
  531. join_index, right_indexer, left_indexer = \
  532. _left_join_on_index(right_ax, left_ax, self.right_join_keys,
  533. sort=self.sort)
  534. else:
  535. (left_indexer,
  536. right_indexer) = self._get_join_indexers()
  537. if self.right_index:
  538. if len(self.left) > 0:
  539. join_index = self.left.index.take(left_indexer)
  540. else:
  541. join_index = self.right.index.take(right_indexer)
  542. left_indexer = np.array([-1] * len(join_index))
  543. elif self.left_index:
  544. if len(self.right) > 0:
  545. join_index = self.right.index.take(right_indexer)
  546. else:
  547. join_index = self.left.index.take(left_indexer)
  548. right_indexer = np.array([-1] * len(join_index))
  549. else:
  550. join_index = Index(np.arange(len(left_indexer)))
  551. if len(join_index) == 0:
  552. join_index = join_index.astype(object)
  553. return join_index, left_indexer, right_indexer
  554. def _get_merge_data(self):
  555. """
  556. Handles overlapping column names etc.
  557. """
  558. ldata, rdata = self.left._data, self.right._data
  559. lsuf, rsuf = self.suffixes
  560. llabels, rlabels = items_overlap_with_suffix(
  561. ldata.items, lsuf, rdata.items, rsuf)
  562. if not llabels.equals(ldata.items):
  563. ldata = ldata.copy(deep=False)
  564. ldata.set_axis(0, llabels)
  565. if not rlabels.equals(rdata.items):
  566. rdata = rdata.copy(deep=False)
  567. rdata.set_axis(0, rlabels)
  568. return ldata, rdata
  569. def _get_merge_keys(self):
  570. """
  571. Note: has side effects (copy/delete key columns)
  572. Parameters
  573. ----------
  574. left
  575. right
  576. on
  577. Returns
  578. -------
  579. left_keys, right_keys
  580. """
  581. left_keys = []
  582. right_keys = []
  583. join_names = []
  584. right_drop = []
  585. left_drop = []
  586. left, right = self.left, self.right
  587. is_lkey = lambda x: isinstance(
  588. x, (np.ndarray, ABCSeries)) and len(x) == len(left)
  589. is_rkey = lambda x: isinstance(
  590. x, (np.ndarray, ABCSeries)) and len(x) == len(right)
  591. # ugh, spaghetti re #733
  592. if _any(self.left_on) and _any(self.right_on):
  593. for lk, rk in zip(self.left_on, self.right_on):
  594. if is_lkey(lk):
  595. left_keys.append(lk)
  596. if is_rkey(rk):
  597. right_keys.append(rk)
  598. join_names.append(None) # what to do?
  599. else:
  600. right_keys.append(right[rk]._values)
  601. join_names.append(rk)
  602. else:
  603. if not is_rkey(rk):
  604. right_keys.append(right[rk]._values)
  605. if lk == rk:
  606. # avoid key upcast in corner case (length-0)
  607. if len(left) > 0:
  608. right_drop.append(rk)
  609. else:
  610. left_drop.append(lk)
  611. else:
  612. right_keys.append(rk)
  613. left_keys.append(left[lk]._values)
  614. join_names.append(lk)
  615. elif _any(self.left_on):
  616. for k in self.left_on:
  617. if is_lkey(k):
  618. left_keys.append(k)
  619. join_names.append(None)
  620. else:
  621. left_keys.append(left[k]._values)
  622. join_names.append(k)
  623. if isinstance(self.right.index, MultiIndex):
  624. right_keys = [lev._values.take(lab)
  625. for lev, lab in zip(self.right.index.levels,
  626. self.right.index.labels)]
  627. else:
  628. right_keys = [self.right.index.values]
  629. elif _any(self.right_on):
  630. for k in self.right_on:
  631. if is_rkey(k):
  632. right_keys.append(k)
  633. join_names.append(None)
  634. else:
  635. right_keys.append(right[k]._values)
  636. join_names.append(k)
  637. if isinstance(self.left.index, MultiIndex):
  638. left_keys = [lev._values.take(lab)
  639. for lev, lab in zip(self.left.index.levels,
  640. self.left.index.labels)]
  641. else:
  642. left_keys = [self.left.index.values]
  643. if left_drop:
  644. self.left = self.left.drop(left_drop, axis=1)
  645. if right_drop:
  646. self.right = self.right.drop(right_drop, axis=1)
  647. return left_keys, right_keys, join_names
  648. def _validate_specification(self):
  649. # Hm, any way to make this logic less complicated??
  650. if self.on is None and self.left_on is None and self.right_on is None:
  651. if self.left_index and self.right_index:
  652. self.left_on, self.right_on = (), ()
  653. elif self.left_index:
  654. if self.right_on is None:
  655. raise MergeError('Must pass right_on or right_index=True')
  656. elif self.right_index:
  657. if self.left_on is None:
  658. raise MergeError('Must pass left_on or left_index=True')
  659. else:
  660. # use the common columns
  661. common_cols = self.left.columns.intersection(
  662. self.right.columns)
  663. if len(common_cols) == 0:
  664. raise MergeError('No common columns to perform merge on')
  665. if not common_cols.is_unique:
  666. raise MergeError("Data columns not unique: %s"
  667. % repr(common_cols))
  668. self.left_on = self.right_on = common_cols
  669. elif self.on is not None:
  670. if self.left_on is not None or self.right_on is not None:
  671. raise MergeError('Can only pass on OR left_on and '
  672. 'right_on')
  673. self.left_on = self.right_on = self.on
  674. elif self.left_on is not None:
  675. n = len(self.left_on)
  676. if self.right_index:
  677. if len(self.left_on) != self.right.index.nlevels:
  678. raise ValueError('len(left_on) must equal the number '
  679. 'of levels in the index of "right"')
  680. self.right_on = [None] * n
  681. elif self.right_on is not None:
  682. n = len(self.right_on)
  683. if self.left_index:
  684. if len(self.right_on) != self.left.index.nlevels:
  685. raise ValueError('len(right_on) must equal the number '
  686. 'of levels in the index of "left"')
  687. self.left_on = [None] * n
  688. if len(self.right_on) != len(self.left_on):
  689. raise ValueError("len(right_on) must equal len(left_on)")
  690. def _get_join_indexers(left_keys, right_keys, sort=False, how='inner',
  691. **kwargs):
  692. """
  693. Parameters
  694. ----------
  695. Returns
  696. -------
  697. """
  698. from functools import partial
  699. assert len(left_keys) == len(right_keys), \
  700. 'left_key and right_keys must be the same length'
  701. # bind `sort` arg. of _factorize_keys
  702. fkeys = partial(_factorize_keys, sort=sort)
  703. # get left & right join labels and num. of levels at each location
  704. llab, rlab, shape = map(list, zip(* map(fkeys, left_keys, right_keys)))
  705. # get flat i8 keys from label lists
  706. lkey, rkey = _get_join_keys(llab, rlab, shape, sort)
  707. # factorize keys to a dense i8 space
  708. # `count` is the num. of unique keys
  709. # set(lkey) | set(rkey) == range(count)
  710. lkey, rkey, count = fkeys(lkey, rkey)
  711. # preserve left frame order if how == 'left' and sort == False
  712. kwargs = copy.copy(kwargs)
  713. if how == 'left':
  714. kwargs['sort'] = sort
  715. join_func = _join_functions[how]
  716. return join_func(lkey, rkey, count, **kwargs)
  717. class _OrderedMerge(_MergeOperation):
  718. _merge_type = 'ordered_merge'
  719. def __init__(self, left, right, on=None, left_on=None,
  720. right_on=None, axis=1,
  721. suffixes=('_x', '_y'), copy=True,
  722. fill_method=None, how='outer'):
  723. self.fill_method = fill_method
  724. _MergeOperation.__init__(self, left, right, on=on, left_on=left_on,
  725. right_on=right_on, axis=axis,
  726. how=how, suffixes=suffixes,
  727. sort=True # factorize sorts
  728. )
  729. def get_result(self):
  730. join_index, left_indexer, right_indexer = self._get_join_info()
  731. # this is a bit kludgy
  732. ldata, rdata = self.left._data, self.right._data
  733. lsuf, rsuf = self.suffixes
  734. llabels, rlabels = items_overlap_with_suffix(ldata.items, lsuf,
  735. rdata.items, rsuf)
  736. if self.fill_method == 'ffill':
  737. left_join_indexer = _join.ffill_indexer(left_indexer)
  738. right_join_indexer = _join.ffill_indexer(right_indexer)
  739. else:
  740. left_join_indexer = left_indexer
  741. right_join_indexer = right_indexer
  742. lindexers = {
  743. 1: left_join_indexer} if left_join_indexer is not None else {}
  744. rindexers = {
  745. 1: right_join_indexer} if right_join_indexer is not None else {}
  746. result_data = concatenate_block_managers(
  747. [(ldata, lindexers), (rdata, rindexers)],
  748. axes=[llabels.append(rlabels), join_index],
  749. concat_axis=0, copy=self.copy)
  750. typ = self.left._constructor
  751. result = typ(result_data).__finalize__(self, method=self._merge_type)
  752. self._maybe_add_join_keys(result, left_indexer, right_indexer)
  753. return result
  754. _asof_functions = {
  755. 'int64_t': _join.asof_join_int64_t,
  756. 'double': _join.asof_join_double,
  757. }
  758. _asof_by_functions = {
  759. ('int64_t', 'int64_t'): _join.asof_join_int64_t_by_int64_t,
  760. ('double', 'int64_t'): _join.asof_join_double_by_int64_t,
  761. ('int64_t', 'object'): _join.asof_join_int64_t_by_object,
  762. ('double', 'object'): _join.asof_join_double_by_object,
  763. }
  764. _type_casters = {
  765. 'int64_t': _ensure_int64,
  766. 'double': _ensure_float64,
  767. 'object': _ensure_object,
  768. }
  769. def _get_cython_type(dtype):
  770. """ Given a dtype, return 'int64_t', 'double', or 'object' """
  771. if is_integer_dtype(dtype):
  772. return 'int64_t'
  773. elif is_float_dtype(dtype):
  774. return 'double'
  775. else:
  776. return 'object'
  777. class _AsOfMerge(_OrderedMerge):
  778. _merge_type = 'asof_merge'
  779. def __init__(self, left, right, on=None, by=None, left_on=None,
  780. right_on=None, axis=1,
  781. suffixes=('_x', '_y'), copy=True,
  782. fill_method=None,
  783. how='asof', tolerance=None,
  784. allow_exact_matches=True):
  785. self.by = by
  786. self.tolerance = tolerance
  787. self.allow_exact_matches = allow_exact_matches
  788. _OrderedMerge.__init__(self, left, right, on=on, left_on=left_on,
  789. right_on=right_on, axis=axis,
  790. how=how, suffixes=suffixes,
  791. fill_method=fill_method)
  792. def _validate_specification(self):
  793. super(_AsOfMerge, self)._validate_specification()
  794. # we only allow on to be a single item for on
  795. if len(self.left_on) != 1:
  796. raise MergeError("can only asof on a key for left")
  797. if len(self.right_on) != 1:
  798. raise MergeError("can only asof on a key for right")
  799. # add by to our key-list so we can have it in the
  800. # output as a key
  801. if self.by is not None:
  802. if not is_list_like(self.by):
  803. self.by = [self.by]
  804. if len(self.by) != 1:
  805. raise MergeError("can only asof by a single key")
  806. self.left_on = self.by + list(self.left_on)
  807. self.right_on = self.by + list(self.right_on)
  808. @property
  809. def _asof_key(self):
  810. """ This is our asof key, the 'on' """
  811. return self.left_on[-1]
  812. def _get_merge_keys(self):
  813. # note this function has side effects
  814. (left_join_keys,
  815. right_join_keys,
  816. join_names) = super(_AsOfMerge, self)._get_merge_keys()
  817. # validate index types are the same
  818. for lk, rk in zip(left_join_keys, right_join_keys):
  819. if not is_dtype_equal(lk.dtype, rk.dtype):
  820. raise MergeError("incompatible merge keys, "
  821. "must be the same type")
  822. # validate tolerance; must be a Timedelta if we have a DTI
  823. if self.tolerance is not None:
  824. lt = left_join_keys[self.left_on.index(self._asof_key)]
  825. msg = "incompatible tolerance, must be compat " \
  826. "with type {0}".format(type(lt))
  827. if is_datetime64_dtype(lt):
  828. if not isinstance(self.tolerance, Timedelta):
  829. raise MergeError(msg)
  830. if self.tolerance < Timedelta(0):
  831. raise MergeError("tolerance must be positive")
  832. elif is_int64_dtype(lt):
  833. if not is_integer(self.tolerance):
  834. raise MergeError(msg)
  835. if self.tolerance < 0:
  836. raise MergeError("tolerance must be positive")
  837. else:
  838. raise MergeError(msg)
  839. # validate allow_exact_matches
  840. if not is_bool(self.allow_exact_matches):
  841. raise MergeError("allow_exact_matches must be boolean, "
  842. "passed {0}".format(self.allow_exact_matches))
  843. return left_join_keys, right_join_keys, join_names
  844. def _get_join_indexers(self):
  845. """ return the join indexers """
  846. # values to compare
  847. left_values = self.left_join_keys[-1]
  848. right_values = self.right_join_keys[-1]
  849. tolerance = self.tolerance
  850. # we required sortedness in the join keys
  851. msg = " keys must be sorted"
  852. if not Index(left_values).is_monotonic:
  853. raise ValueError('left' + msg)
  854. if not Index(right_values).is_monotonic:
  855. raise ValueError('right' + msg)
  856. # initial type conversion as needed
  857. if needs_i8_conversion(left_values):
  858. left_values = left_values.view('i8')
  859. right_values = right_values.view('i8')
  860. if tolerance is not None:
  861. tolerance = tolerance.value
  862. # a "by" parameter requires special handling
  863. if self.by is not None:
  864. left_by_values = self.left_join_keys[0]
  865. right_by_values = self.right_join_keys[0]
  866. # choose appropriate function by type
  867. on_type = _get_cython_type(left_values.dtype)
  868. by_type = _get_cython_type(left_by_values.dtype)
  869. on_type_caster = _type_casters[on_type]
  870. by_type_caster = _type_casters[by_type]
  871. func = _asof_by_functions[(on_type, by_type)]
  872. left_values = on_type_caster(left_values)
  873. right_values = on_type_caster(right_values)
  874. left_by_values = by_type_caster(left_by_values)
  875. right_by_values = by_type_caster(right_by_values)
  876. return func(left_values,
  877. right_values,
  878. left_by_values,
  879. right_by_values,
  880. self.allow_exact_matches,
  881. tolerance)
  882. else:
  883. # choose appropriate function by type
  884. on_type = _get_cython_type(left_values.dtype)
  885. type_caster = _type_casters[on_type]
  886. func = _asof_functions[on_type]
  887. left_values = type_caster(left_values)
  888. right_values = type_caster(right_values)
  889. return func(left_values,
  890. right_values,
  891. self.allow_exact_matches,
  892. tolerance)
  893. def _get_multiindex_indexer(join_keys, index, sort):
  894. from functools import partial
  895. # bind `sort` argument
  896. fkeys = partial(_factorize_keys, sort=sort)
  897. # left & right join labels and num. of levels at each location
  898. rlab, llab, shape = map(list, zip(* map(fkeys, index.levels, join_keys)))
  899. if sort:
  900. rlab = list(map(np.take, rlab, index.labels))
  901. else:
  902. i8copy = lambda a: a.astype('i8', subok=False, copy=True)
  903. rlab = list(map(i8copy, index.labels))
  904. # fix right labels if there were any nulls
  905. for i in range(len(join_keys)):
  906. mask = index.labels[i] == -1
  907. if mask.any():
  908. # check if there already was any nulls at this location
  909. # if there was, it is factorized to `shape[i] - 1`
  910. a = join_keys[i][llab[i] == shape[i] - 1]
  911. if a.size == 0 or not a[0] != a[0]:
  912. shape[i] += 1
  913. rlab[i][mask] = shape[i] - 1
  914. # get flat i8 join keys
  915. lkey, rkey = _get_join_keys(llab, rlab, shape, sort)
  916. # factorize keys to a dense i8 space
  917. lkey, rkey, count = fkeys(lkey, rkey)
  918. return _join.left_outer_join(lkey, rkey, count, sort=sort)
  919. def _get_single_indexer(join_key, index, sort=False):
  920. left_key, right_key, count = _factorize_keys(join_key, index, sort=sort)
  921. left_indexer, right_indexer = _join.left_outer_join(
  922. _ensure_int64(left_key),
  923. _ensure_int64(right_key),
  924. count, sort=sort)
  925. return left_indexer, right_indexer
  926. def _left_join_on_index(left_ax, right_ax, join_keys, sort=False):
  927. if len(join_keys) > 1:
  928. if not ((isinstance(right_ax, MultiIndex) and
  929. len(join_keys) == right_ax.nlevels)):
  930. raise AssertionError("If more than one join key is given then "
  931. "'right_ax' must be a MultiIndex and the "
  932. "number of join keys must be the number of "
  933. "levels in right_ax")
  934. left_indexer, right_indexer = \
  935. _get_multiindex_indexer(join_keys, right_ax, sort=sort)
  936. else:
  937. jkey = join_keys[0]
  938. left_indexer, right_indexer = \
  939. _get_single_indexer(jkey, right_ax, sort=sort)
  940. if sort or len(left_ax) != len(left_indexer):
  941. # if asked to sort or there are 1-to-many matches
  942. join_index = left_ax.take(left_indexer)
  943. return join_index, left_indexer, right_indexer
  944. # left frame preserves order & length of its index
  945. return left_ax, None, right_indexer
  946. def _right_outer_join(x, y, max_groups):
  947. right_indexer, left_indexer = _join.left_outer_join(y, x, max_groups)
  948. return left_indexer, right_indexer
  949. _join_functions = {
  950. 'inner': _join.inner_join,
  951. 'left': _join.left_outer_join,
  952. 'right': _right_outer_join,
  953. 'outer': _join.full_outer_join,
  954. }
  955. def _factorize_keys(lk, rk, sort=True):
  956. if is_datetime64tz_dtype(lk) and is_datetime64tz_dtype(rk):
  957. lk = lk.values
  958. rk = rk.values
  959. if is_int_or_datetime_dtype(lk) and is_int_or_datetime_dtype(rk):
  960. klass = _hash.Int64Factorizer
  961. lk = _ensure_int64(com._values_from_object(lk))
  962. rk = _ensure_int64(com._values_from_object(rk))
  963. else:
  964. klass = _hash.Factorizer
  965. lk = _ensure_object(lk)
  966. rk = _ensure_object(rk)
  967. rizer = klass(max(len(lk), len(rk)))
  968. llab = rizer.factorize(lk)
  969. rlab = rizer.factorize(rk)
  970. count = rizer.get_count()
  971. if sort:
  972. uniques = rizer.uniques.to_array()
  973. llab, rlab = _sort_labels(uniques, llab, rlab)
  974. # NA group
  975. lmask = llab == -1
  976. lany = lmask.any()
  977. rmask = rlab == -1
  978. rany = rmask.any()
  979. if lany or rany:
  980. if lany:
  981. np.putmask(llab, lmask, count)
  982. if rany:
  983. np.putmask(rlab, rmask, count)
  984. count += 1
  985. return llab, rlab, count
  986. def _sort_labels(uniques, left, right):
  987. if not isinstance(uniques, np.ndarray):
  988. # tuplesafe
  989. uniques = Index(uniques).values
  990. l = len(left)
  991. labels = np.concatenate([left, right])
  992. _, new_labels = algos.safe_sort(uniques, labels, na_sentinel=-1)
  993. new_labels = _ensure_int64(new_labels)
  994. new_left, new_right = new_labels[:l], new_labels[l:]
  995. return new_left, new_right
  996. def _get_join_keys(llab, rlab, shape, sort):
  997. from pandas.core.groupby import _int64_overflow_possible
  998. # how many levels can be done without overflow
  999. pred = lambda i: not _int64_overflow_possible(shape[:i])
  1000. nlev = next(filter(pred, range(len(shape), 0, -1)))
  1001. # get keys for the first `nlev` levels
  1002. stride = np.prod(shape[1:nlev], dtype='i8')
  1003. lkey = stride * llab[0].astype('i8', subok=False, copy=False)
  1004. rkey = stride * rlab[0].astype('i8', subok=False, copy=False)
  1005. for i in range(1, nlev):
  1006. stride //= shape[i]
  1007. lkey += llab[i] * stride
  1008. rkey += rlab[i] * stride
  1009. if nlev == len(shape): # all done!
  1010. return lkey, rkey
  1011. # densify current keys to avoid overflow
  1012. lkey, rkey, count = _factorize_keys(lkey, rkey, sort=sort)
  1013. llab = [lkey] + llab[nlev:]
  1014. rlab = [rkey] + rlab[nlev:]
  1015. shape = [count] + shape[nlev:]
  1016. return _get_join_keys(llab, rlab, shape, sort)
  1017. # ---------------------------------------------------------------------
  1018. # Concatenate DataFrame objects
  1019. def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
  1020. keys=None, levels=None, names=None, verify_integrity=False,
  1021. copy=True):
  1022. """
  1023. Concatenate pandas objects along a particular axis with optional set logic
  1024. along the other axes. Can also add a layer of hierarchical indexing on the
  1025. concatenation axis, which may be useful if the labels are the same (or
  1026. overlapping) on the passed axis number
  1027. Parameters
  1028. ----------
  1029. objs : a sequence or mapping of Series, DataFrame, or Panel objects
  1030. If a dict is passed, the sorted keys will be used as the `keys`
  1031. argument, unless it is passed, in which case the values will be
  1032. selected (see below). Any None objects will be dropped silently unless
  1033. they are all None in which case a ValueError will be raised
  1034. axis : {0, 1, ...}, default 0
  1035. The axis to concatenate along
  1036. join : {'inner', 'outer'}, default 'outer'
  1037. How to handle indexes on other axis(es)
  1038. join_axes : list of Index objects
  1039. Specific indexes to use for the other n - 1 axes instead of performing
  1040. inner/outer set logic
  1041. verify_integrity : boolean, default False
  1042. Check whether the new concatenated axis contains duplicates. This can
  1043. be very expensive relative to the actual data concatenation
  1044. keys : sequence, default None
  1045. If multiple levels passed, should contain tuples. Construct
  1046. hierarchical index using the passed keys as the outermost level
  1047. levels : list of sequences, default None
  1048. Specific levels (unique values) to use for constructing a
  1049. MultiIndex. Otherwise they will be inferred from the keys
  1050. names : list, default None
  1051. Names for the levels in the resulting hierarchical index
  1052. ignore_index : boolean, default False
  1053. If True, do not use the index values along the concatenation axis. The
  1054. resulting axis will be labeled 0, ..., n - 1. This is useful if you are
  1055. concatenating objects where the concatenation axis does not have
  1056. meaningful indexing information. Note the index values on the other
  1057. axes are still respected in the join.
  1058. copy : boolean, default True
  1059. If False, do not copy data unnecessarily
  1060. Notes
  1061. -----
  1062. The keys, levels, and names arguments are all optional
  1063. Returns
  1064. -------
  1065. concatenated : type of objects
  1066. """
  1067. op = _Concatenator(objs, axis=axis, join_axes=join_axes,
  1068. ignore_index=ignore_index, join=join,
  1069. keys=keys, levels=levels, names=names,
  1070. verify_integrity=verify_integrity,
  1071. copy=copy)
  1072. return op.get_result()
  1073. class _Concatenator(object):
  1074. """
  1075. Orchestrates a concatenation operation for BlockManagers
  1076. """
  1077. def __init__(self, objs, axis=0, join='outer', join_axes=None,
  1078. keys=None, levels=None, names=None,
  1079. ignore_index=False, verify_integrity=False, copy=True):
  1080. if isinstance(objs, (NDFrame, compat.string_types)):
  1081. raise TypeError('first argument must be an iterable of pandas '
  1082. 'objects, you passed an object of type '
  1083. '"{0}"'.format(type(objs).__name__))
  1084. if join == 'outer':
  1085. self.intersect = False
  1086. elif join == 'inner':
  1087. self.intersect = True
  1088. else: # pragma: no cover
  1089. raise ValueError('Only can inner (intersect) or outer (union) '
  1090. 'join the other axis')
  1091. if isinstance(objs, dict):
  1092. if keys is None:
  1093. keys = sorted(objs)
  1094. objs = [objs[k] for k in keys]
  1095. else:
  1096. objs = list(objs)
  1097. if len(objs) == 0:
  1098. raise ValueError('No objects to concatenate')
  1099. if keys is None:
  1100. objs = [obj for obj in objs if obj is not None]
  1101. else:
  1102. # #1649
  1103. clean_keys = []
  1104. clean_objs = []
  1105. for k, v in zip(keys, objs):
  1106. if v is None:
  1107. continue
  1108. clean_keys.append(k)
  1109. clean_objs.append(v)
  1110. objs = clean_objs
  1111. keys = clean_keys
  1112. if len(objs) == 0:
  1113. raise ValueError('All objects passed were None')
  1114. # consolidate data & figure out what our result ndim is going to be
  1115. ndims = set()
  1116. for obj in objs:
  1117. if not isinstance

Large files files are truncated, but you can click here to view the full file