PageRenderTime 41ms CodeModel.GetById 17ms RepoModel.GetById 1ms app.codeStats 0ms

/pandas/io/sql.py

http://github.com/pydata/pandas
Python | 2241 lines | 1916 code | 77 blank | 248 comment | 114 complexity | ba583a4c90e470c29bc01db95efe02ad MD5 | raw file
Possible License(s): BSD-3-Clause, Apache-2.0
  1. """
  2. Collection of query wrappers / abstractions to both facilitate data
  3. retrieval and to reduce dependency on DB-specific API.
  4. """
  5. from __future__ import annotations
  6. from contextlib import contextmanager
  7. from datetime import (
  8. date,
  9. datetime,
  10. time,
  11. )
  12. from functools import partial
  13. import re
  14. from typing import (
  15. Any,
  16. Iterator,
  17. Sequence,
  18. cast,
  19. overload,
  20. )
  21. import warnings
  22. import numpy as np
  23. import pandas._libs.lib as lib
  24. from pandas._typing import DtypeArg
  25. from pandas.compat._optional import import_optional_dependency
  26. from pandas.errors import AbstractMethodError
  27. from pandas.core.dtypes.common import (
  28. is_datetime64tz_dtype,
  29. is_dict_like,
  30. is_list_like,
  31. )
  32. from pandas.core.dtypes.dtypes import DatetimeTZDtype
  33. from pandas.core.dtypes.missing import isna
  34. from pandas import get_option
  35. from pandas.core.api import (
  36. DataFrame,
  37. Series,
  38. )
  39. from pandas.core.base import PandasObject
  40. from pandas.core.tools.datetimes import to_datetime
  41. from pandas.util.version import Version
  42. class DatabaseError(OSError):
  43. pass
  44. # -----------------------------------------------------------------------------
  45. # -- Helper functions
  46. def _gt14() -> bool:
  47. """
  48. Check if sqlalchemy.__version__ is at least 1.4.0, when several
  49. deprecations were made.
  50. """
  51. import sqlalchemy
  52. return Version(sqlalchemy.__version__) >= Version("1.4.0")
  53. def _convert_params(sql, params):
  54. """Convert SQL and params args to DBAPI2.0 compliant format."""
  55. args = [sql]
  56. if params is not None:
  57. if hasattr(params, "keys"): # test if params is a mapping
  58. args += [params]
  59. else:
  60. args += [list(params)]
  61. return args
  62. def _process_parse_dates_argument(parse_dates):
  63. """Process parse_dates argument for read_sql functions"""
  64. # handle non-list entries for parse_dates gracefully
  65. if parse_dates is True or parse_dates is None or parse_dates is False:
  66. parse_dates = []
  67. elif not hasattr(parse_dates, "__iter__"):
  68. parse_dates = [parse_dates]
  69. return parse_dates
  70. def _handle_date_column(
  71. col, utc: bool | None = None, format: str | dict[str, Any] | None = None
  72. ):
  73. if isinstance(format, dict):
  74. # GH35185 Allow custom error values in parse_dates argument of
  75. # read_sql like functions.
  76. # Format can take on custom to_datetime argument values such as
  77. # {"errors": "coerce"} or {"dayfirst": True}
  78. error = format.pop("errors", None) or "ignore"
  79. return to_datetime(col, errors=error, **format)
  80. else:
  81. # Allow passing of formatting string for integers
  82. # GH17855
  83. if format is None and (
  84. issubclass(col.dtype.type, np.floating)
  85. or issubclass(col.dtype.type, np.integer)
  86. ):
  87. format = "s"
  88. if format in ["D", "d", "h", "m", "s", "ms", "us", "ns"]:
  89. return to_datetime(col, errors="coerce", unit=format, utc=utc)
  90. elif is_datetime64tz_dtype(col.dtype):
  91. # coerce to UTC timezone
  92. # GH11216
  93. return to_datetime(col, utc=True)
  94. else:
  95. return to_datetime(col, errors="coerce", format=format, utc=utc)
  96. def _parse_date_columns(data_frame, parse_dates):
  97. """
  98. Force non-datetime columns to be read as such.
  99. Supports both string formatted and integer timestamp columns.
  100. """
  101. parse_dates = _process_parse_dates_argument(parse_dates)
  102. # we want to coerce datetime64_tz dtypes for now to UTC
  103. # we could in theory do a 'nice' conversion from a FixedOffset tz
  104. # GH11216
  105. for col_name, df_col in data_frame.items():
  106. if is_datetime64tz_dtype(df_col.dtype) or col_name in parse_dates:
  107. try:
  108. fmt = parse_dates[col_name]
  109. except TypeError:
  110. fmt = None
  111. data_frame[col_name] = _handle_date_column(df_col, format=fmt)
  112. return data_frame
  113. def _wrap_result(
  114. data,
  115. columns,
  116. index_col=None,
  117. coerce_float: bool = True,
  118. parse_dates=None,
  119. dtype: DtypeArg | None = None,
  120. ):
  121. """Wrap result set of query in a DataFrame."""
  122. frame = DataFrame.from_records(data, columns=columns, coerce_float=coerce_float)
  123. if dtype:
  124. frame = frame.astype(dtype)
  125. frame = _parse_date_columns(frame, parse_dates)
  126. if index_col is not None:
  127. frame.set_index(index_col, inplace=True)
  128. return frame
  129. def execute(sql, con, params=None):
  130. """
  131. Execute the given SQL query using the provided connection object.
  132. Parameters
  133. ----------
  134. sql : string
  135. SQL query to be executed.
  136. con : SQLAlchemy connectable(engine/connection) or sqlite3 connection
  137. Using SQLAlchemy makes it possible to use any DB supported by the
  138. library.
  139. If a DBAPI2 object, only sqlite3 is supported.
  140. params : list or tuple, optional, default: None
  141. List of parameters to pass to execute method.
  142. Returns
  143. -------
  144. Results Iterable
  145. """
  146. pandas_sql = pandasSQL_builder(con)
  147. args = _convert_params(sql, params)
  148. return pandas_sql.execute(*args)
  149. # -----------------------------------------------------------------------------
  150. # -- Read and write to DataFrames
  151. @overload
  152. def read_sql_table(
  153. table_name,
  154. con,
  155. schema=None,
  156. index_col=None,
  157. coerce_float=True,
  158. parse_dates=None,
  159. columns=None,
  160. chunksize: None = None,
  161. ) -> DataFrame:
  162. ...
  163. @overload
  164. def read_sql_table(
  165. table_name,
  166. con,
  167. schema=None,
  168. index_col=None,
  169. coerce_float=True,
  170. parse_dates=None,
  171. columns=None,
  172. chunksize: int = 1,
  173. ) -> Iterator[DataFrame]:
  174. ...
  175. def read_sql_table(
  176. table_name: str,
  177. con,
  178. schema: str | None = None,
  179. index_col: str | Sequence[str] | None = None,
  180. coerce_float: bool = True,
  181. parse_dates=None,
  182. columns=None,
  183. chunksize: int | None = None,
  184. ) -> DataFrame | Iterator[DataFrame]:
  185. """
  186. Read SQL database table into a DataFrame.
  187. Given a table name and a SQLAlchemy connectable, returns a DataFrame.
  188. This function does not support DBAPI connections.
  189. Parameters
  190. ----------
  191. table_name : str
  192. Name of SQL table in database.
  193. con : SQLAlchemy connectable or str
  194. A database URI could be provided as str.
  195. SQLite DBAPI connection mode not supported.
  196. schema : str, default None
  197. Name of SQL schema in database to query (if database flavor
  198. supports this). Uses default schema if None (default).
  199. index_col : str or list of str, optional, default: None
  200. Column(s) to set as index(MultiIndex).
  201. coerce_float : bool, default True
  202. Attempts to convert values of non-string, non-numeric objects (like
  203. decimal.Decimal) to floating point. Can result in loss of Precision.
  204. parse_dates : list or dict, default None
  205. - List of column names to parse as dates.
  206. - Dict of ``{column_name: format string}`` where format string is
  207. strftime compatible in case of parsing string times or is one of
  208. (D, s, ns, ms, us) in case of parsing integer timestamps.
  209. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds
  210. to the keyword arguments of :func:`pandas.to_datetime`
  211. Especially useful with databases without native Datetime support,
  212. such as SQLite.
  213. columns : list, default None
  214. List of column names to select from SQL table.
  215. chunksize : int, default None
  216. If specified, returns an iterator where `chunksize` is the number of
  217. rows to include in each chunk.
  218. Returns
  219. -------
  220. DataFrame or Iterator[DataFrame]
  221. A SQL table is returned as two-dimensional data structure with labeled
  222. axes.
  223. See Also
  224. --------
  225. read_sql_query : Read SQL query into a DataFrame.
  226. read_sql : Read SQL query or database table into a DataFrame.
  227. Notes
  228. -----
  229. Any datetime values with time zone information will be converted to UTC.
  230. Examples
  231. --------
  232. >>> pd.read_sql_table('table_name', 'postgres:///db_name') # doctest:+SKIP
  233. """
  234. pandas_sql = pandasSQL_builder(con, schema=schema)
  235. if not pandas_sql.has_table(table_name):
  236. raise ValueError(f"Table {table_name} not found")
  237. table = pandas_sql.read_table(
  238. table_name,
  239. index_col=index_col,
  240. coerce_float=coerce_float,
  241. parse_dates=parse_dates,
  242. columns=columns,
  243. chunksize=chunksize,
  244. )
  245. if table is not None:
  246. return table
  247. else:
  248. raise ValueError(f"Table {table_name} not found", con)
  249. @overload
  250. def read_sql_query(
  251. sql,
  252. con,
  253. index_col=None,
  254. coerce_float=True,
  255. params=None,
  256. parse_dates=None,
  257. chunksize: None = None,
  258. dtype: DtypeArg | None = None,
  259. ) -> DataFrame:
  260. ...
  261. @overload
  262. def read_sql_query(
  263. sql,
  264. con,
  265. index_col=None,
  266. coerce_float=True,
  267. params=None,
  268. parse_dates=None,
  269. chunksize: int = 1,
  270. dtype: DtypeArg | None = None,
  271. ) -> Iterator[DataFrame]:
  272. ...
  273. def read_sql_query(
  274. sql,
  275. con,
  276. index_col=None,
  277. coerce_float: bool = True,
  278. params=None,
  279. parse_dates=None,
  280. chunksize: int | None = None,
  281. dtype: DtypeArg | None = None,
  282. ) -> DataFrame | Iterator[DataFrame]:
  283. """
  284. Read SQL query into a DataFrame.
  285. Returns a DataFrame corresponding to the result set of the query
  286. string. Optionally provide an `index_col` parameter to use one of the
  287. columns as the index, otherwise default integer index will be used.
  288. Parameters
  289. ----------
  290. sql : str SQL query or SQLAlchemy Selectable (select or text object)
  291. SQL query to be executed.
  292. con : SQLAlchemy connectable, str, or sqlite3 connection
  293. Using SQLAlchemy makes it possible to use any DB supported by that
  294. library. If a DBAPI2 object, only sqlite3 is supported.
  295. index_col : str or list of str, optional, default: None
  296. Column(s) to set as index(MultiIndex).
  297. coerce_float : bool, default True
  298. Attempts to convert values of non-string, non-numeric objects (like
  299. decimal.Decimal) to floating point. Useful for SQL result sets.
  300. params : list, tuple or dict, optional, default: None
  301. List of parameters to pass to execute method. The syntax used
  302. to pass parameters is database driver dependent. Check your
  303. database driver documentation for which of the five syntax styles,
  304. described in PEP 249's paramstyle, is supported.
  305. Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}.
  306. parse_dates : list or dict, default: None
  307. - List of column names to parse as dates.
  308. - Dict of ``{column_name: format string}`` where format string is
  309. strftime compatible in case of parsing string times, or is one of
  310. (D, s, ns, ms, us) in case of parsing integer timestamps.
  311. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds
  312. to the keyword arguments of :func:`pandas.to_datetime`
  313. Especially useful with databases without native Datetime support,
  314. such as SQLite.
  315. chunksize : int, default None
  316. If specified, return an iterator where `chunksize` is the number of
  317. rows to include in each chunk.
  318. dtype : Type name or dict of columns
  319. Data type for data or columns. E.g. np.float64 or
  320. {‘a’: np.float64, ‘b’: np.int32, ‘c’: ‘Int64’}
  321. .. versionadded:: 1.3.0
  322. Returns
  323. -------
  324. DataFrame or Iterator[DataFrame]
  325. See Also
  326. --------
  327. read_sql_table : Read SQL database table into a DataFrame.
  328. read_sql : Read SQL query or database table into a DataFrame.
  329. Notes
  330. -----
  331. Any datetime values with time zone information parsed via the `parse_dates`
  332. parameter will be converted to UTC.
  333. """
  334. pandas_sql = pandasSQL_builder(con)
  335. return pandas_sql.read_query(
  336. sql,
  337. index_col=index_col,
  338. params=params,
  339. coerce_float=coerce_float,
  340. parse_dates=parse_dates,
  341. chunksize=chunksize,
  342. dtype=dtype,
  343. )
  344. @overload
  345. def read_sql(
  346. sql,
  347. con,
  348. index_col=None,
  349. coerce_float=True,
  350. params=None,
  351. parse_dates=None,
  352. columns=None,
  353. chunksize: None = None,
  354. ) -> DataFrame:
  355. ...
  356. @overload
  357. def read_sql(
  358. sql,
  359. con,
  360. index_col=None,
  361. coerce_float=True,
  362. params=None,
  363. parse_dates=None,
  364. columns=None,
  365. chunksize: int = 1,
  366. ) -> Iterator[DataFrame]:
  367. ...
  368. def read_sql(
  369. sql,
  370. con,
  371. index_col: str | Sequence[str] | None = None,
  372. coerce_float: bool = True,
  373. params=None,
  374. parse_dates=None,
  375. columns=None,
  376. chunksize: int | None = None,
  377. ) -> DataFrame | Iterator[DataFrame]:
  378. """
  379. Read SQL query or database table into a DataFrame.
  380. This function is a convenience wrapper around ``read_sql_table`` and
  381. ``read_sql_query`` (for backward compatibility). It will delegate
  382. to the specific function depending on the provided input. A SQL query
  383. will be routed to ``read_sql_query``, while a database table name will
  384. be routed to ``read_sql_table``. Note that the delegated function might
  385. have more specific notes about their functionality not listed here.
  386. Parameters
  387. ----------
  388. sql : str or SQLAlchemy Selectable (select or text object)
  389. SQL query to be executed or a table name.
  390. con : SQLAlchemy connectable, str, or sqlite3 connection
  391. Using SQLAlchemy makes it possible to use any DB supported by that
  392. library. If a DBAPI2 object, only sqlite3 is supported. The user is responsible
  393. for engine disposal and connection closure for the SQLAlchemy connectable; str
  394. connections are closed automatically. See
  395. `here <https://docs.sqlalchemy.org/en/13/core/connections.html>`_.
  396. index_col : str or list of str, optional, default: None
  397. Column(s) to set as index(MultiIndex).
  398. coerce_float : bool, default True
  399. Attempts to convert values of non-string, non-numeric objects (like
  400. decimal.Decimal) to floating point, useful for SQL result sets.
  401. params : list, tuple or dict, optional, default: None
  402. List of parameters to pass to execute method. The syntax used
  403. to pass parameters is database driver dependent. Check your
  404. database driver documentation for which of the five syntax styles,
  405. described in PEP 249's paramstyle, is supported.
  406. Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}.
  407. parse_dates : list or dict, default: None
  408. - List of column names to parse as dates.
  409. - Dict of ``{column_name: format string}`` where format string is
  410. strftime compatible in case of parsing string times, or is one of
  411. (D, s, ns, ms, us) in case of parsing integer timestamps.
  412. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds
  413. to the keyword arguments of :func:`pandas.to_datetime`
  414. Especially useful with databases without native Datetime support,
  415. such as SQLite.
  416. columns : list, default: None
  417. List of column names to select from SQL table (only used when reading
  418. a table).
  419. chunksize : int, default None
  420. If specified, return an iterator where `chunksize` is the
  421. number of rows to include in each chunk.
  422. Returns
  423. -------
  424. DataFrame or Iterator[DataFrame]
  425. See Also
  426. --------
  427. read_sql_table : Read SQL database table into a DataFrame.
  428. read_sql_query : Read SQL query into a DataFrame.
  429. Examples
  430. --------
  431. Read data from SQL via either a SQL query or a SQL tablename.
  432. When using a SQLite database only SQL queries are accepted,
  433. providing only the SQL tablename will result in an error.
  434. >>> from sqlite3 import connect
  435. >>> conn = connect(':memory:')
  436. >>> df = pd.DataFrame(data=[[0, '10/11/12'], [1, '12/11/10']],
  437. ... columns=['int_column', 'date_column'])
  438. >>> df.to_sql('test_data', conn)
  439. >>> pd.read_sql('SELECT int_column, date_column FROM test_data', conn)
  440. int_column date_column
  441. 0 0 10/11/12
  442. 1 1 12/11/10
  443. >>> pd.read_sql('test_data', 'postgres:///db_name') # doctest:+SKIP
  444. Apply date parsing to columns through the ``parse_dates`` argument
  445. >>> pd.read_sql('SELECT int_column, date_column FROM test_data',
  446. ... conn,
  447. ... parse_dates=["date_column"])
  448. int_column date_column
  449. 0 0 2012-10-11
  450. 1 1 2010-12-11
  451. The ``parse_dates`` argument calls ``pd.to_datetime`` on the provided columns.
  452. Custom argument values for applying ``pd.to_datetime`` on a column are specified
  453. via a dictionary format:
  454. 1. Ignore errors while parsing the values of "date_column"
  455. >>> pd.read_sql('SELECT int_column, date_column FROM test_data',
  456. ... conn,
  457. ... parse_dates={"date_column": {"errors": "ignore"}})
  458. int_column date_column
  459. 0 0 2012-10-11
  460. 1 1 2010-12-11
  461. 2. Apply a dayfirst date parsing order on the values of "date_column"
  462. >>> pd.read_sql('SELECT int_column, date_column FROM test_data',
  463. ... conn,
  464. ... parse_dates={"date_column": {"dayfirst": True}})
  465. int_column date_column
  466. 0 0 2012-11-10
  467. 1 1 2010-11-12
  468. 3. Apply custom formatting when date parsing the values of "date_column"
  469. >>> pd.read_sql('SELECT int_column, date_column FROM test_data',
  470. ... conn,
  471. ... parse_dates={"date_column": {"format": "%d/%m/%y"}})
  472. int_column date_column
  473. 0 0 2012-11-10
  474. 1 1 2010-11-12
  475. """
  476. pandas_sql = pandasSQL_builder(con)
  477. if isinstance(pandas_sql, SQLiteDatabase):
  478. return pandas_sql.read_query(
  479. sql,
  480. index_col=index_col,
  481. params=params,
  482. coerce_float=coerce_float,
  483. parse_dates=parse_dates,
  484. chunksize=chunksize,
  485. )
  486. try:
  487. _is_table_name = pandas_sql.has_table(sql)
  488. except Exception:
  489. # using generic exception to catch errors from sql drivers (GH24988)
  490. _is_table_name = False
  491. if _is_table_name:
  492. pandas_sql.meta.reflect(bind=pandas_sql.connectable, only=[sql])
  493. return pandas_sql.read_table(
  494. sql,
  495. index_col=index_col,
  496. coerce_float=coerce_float,
  497. parse_dates=parse_dates,
  498. columns=columns,
  499. chunksize=chunksize,
  500. )
  501. else:
  502. return pandas_sql.read_query(
  503. sql,
  504. index_col=index_col,
  505. params=params,
  506. coerce_float=coerce_float,
  507. parse_dates=parse_dates,
  508. chunksize=chunksize,
  509. )
  510. def to_sql(
  511. frame,
  512. name: str,
  513. con,
  514. schema: str | None = None,
  515. if_exists: str = "fail",
  516. index: bool = True,
  517. index_label=None,
  518. chunksize: int | None = None,
  519. dtype: DtypeArg | None = None,
  520. method: str | None = None,
  521. engine: str = "auto",
  522. **engine_kwargs,
  523. ) -> None:
  524. """
  525. Write records stored in a DataFrame to a SQL database.
  526. Parameters
  527. ----------
  528. frame : DataFrame, Series
  529. name : str
  530. Name of SQL table.
  531. con : SQLAlchemy connectable(engine/connection) or database string URI
  532. or sqlite3 DBAPI2 connection
  533. Using SQLAlchemy makes it possible to use any DB supported by that
  534. library.
  535. If a DBAPI2 object, only sqlite3 is supported.
  536. schema : str, optional
  537. Name of SQL schema in database to write to (if database flavor
  538. supports this). If None, use default schema (default).
  539. if_exists : {'fail', 'replace', 'append'}, default 'fail'
  540. - fail: If table exists, do nothing.
  541. - replace: If table exists, drop it, recreate it, and insert data.
  542. - append: If table exists, insert data. Create if does not exist.
  543. index : bool, default True
  544. Write DataFrame index as a column.
  545. index_label : str or sequence, optional
  546. Column label for index column(s). If None is given (default) and
  547. `index` is True, then the index names are used.
  548. A sequence should be given if the DataFrame uses MultiIndex.
  549. chunksize : int, optional
  550. Specify the number of rows in each batch to be written at a time.
  551. By default, all rows will be written at once.
  552. dtype : dict or scalar, optional
  553. Specifying the datatype for columns. If a dictionary is used, the
  554. keys should be the column names and the values should be the
  555. SQLAlchemy types or strings for the sqlite3 fallback mode. If a
  556. scalar is provided, it will be applied to all columns.
  557. method : {None, 'multi', callable}, optional
  558. Controls the SQL insertion clause used:
  559. - None : Uses standard SQL ``INSERT`` clause (one per row).
  560. - 'multi': Pass multiple values in a single ``INSERT`` clause.
  561. - callable with signature ``(pd_table, conn, keys, data_iter)``.
  562. Details and a sample callable implementation can be found in the
  563. section :ref:`insert method <io.sql.method>`.
  564. engine : {'auto', 'sqlalchemy'}, default 'auto'
  565. SQL engine library to use. If 'auto', then the option
  566. ``io.sql.engine`` is used. The default ``io.sql.engine``
  567. behavior is 'sqlalchemy'
  568. .. versionadded:: 1.3.0
  569. **engine_kwargs
  570. Any additional kwargs are passed to the engine.
  571. """
  572. if if_exists not in ("fail", "replace", "append"):
  573. raise ValueError(f"'{if_exists}' is not valid for if_exists")
  574. pandas_sql = pandasSQL_builder(con, schema=schema)
  575. if isinstance(frame, Series):
  576. frame = frame.to_frame()
  577. elif not isinstance(frame, DataFrame):
  578. raise NotImplementedError(
  579. "'frame' argument should be either a Series or a DataFrame"
  580. )
  581. pandas_sql.to_sql(
  582. frame,
  583. name,
  584. if_exists=if_exists,
  585. index=index,
  586. index_label=index_label,
  587. schema=schema,
  588. chunksize=chunksize,
  589. dtype=dtype,
  590. method=method,
  591. engine=engine,
  592. **engine_kwargs,
  593. )
  594. def has_table(table_name: str, con, schema: str | None = None):
  595. """
  596. Check if DataBase has named table.
  597. Parameters
  598. ----------
  599. table_name: string
  600. Name of SQL table.
  601. con: SQLAlchemy connectable(engine/connection) or sqlite3 DBAPI2 connection
  602. Using SQLAlchemy makes it possible to use any DB supported by that
  603. library.
  604. If a DBAPI2 object, only sqlite3 is supported.
  605. schema : string, default None
  606. Name of SQL schema in database to write to (if database flavor supports
  607. this). If None, use default schema (default).
  608. Returns
  609. -------
  610. boolean
  611. """
  612. pandas_sql = pandasSQL_builder(con, schema=schema)
  613. return pandas_sql.has_table(table_name)
  614. table_exists = has_table
  615. def pandasSQL_builder(con, schema: str | None = None):
  616. """
  617. Convenience function to return the correct PandasSQL subclass based on the
  618. provided parameters.
  619. """
  620. import sqlite3
  621. if isinstance(con, sqlite3.Connection) or con is None:
  622. return SQLiteDatabase(con)
  623. sqlalchemy = import_optional_dependency("sqlalchemy")
  624. if isinstance(con, str):
  625. con = sqlalchemy.create_engine(con)
  626. if isinstance(con, sqlalchemy.engine.Connectable):
  627. return SQLDatabase(con, schema=schema)
  628. raise ValueError(
  629. "pandas only support SQLAlchemy connectable(engine/connection) or"
  630. "database string URI or sqlite3 DBAPI2 connection"
  631. )
  632. class SQLTable(PandasObject):
  633. """
  634. For mapping Pandas tables to SQL tables.
  635. Uses fact that table is reflected by SQLAlchemy to
  636. do better type conversions.
  637. Also holds various flags needed to avoid having to
  638. pass them between functions all the time.
  639. """
  640. # TODO: support for multiIndex
  641. def __init__(
  642. self,
  643. name: str,
  644. pandas_sql_engine,
  645. frame=None,
  646. index=True,
  647. if_exists="fail",
  648. prefix="pandas",
  649. index_label=None,
  650. schema=None,
  651. keys=None,
  652. dtype: DtypeArg | None = None,
  653. ):
  654. self.name = name
  655. self.pd_sql = pandas_sql_engine
  656. self.prefix = prefix
  657. self.frame = frame
  658. self.index = self._index_name(index, index_label)
  659. self.schema = schema
  660. self.if_exists = if_exists
  661. self.keys = keys
  662. self.dtype = dtype
  663. if frame is not None:
  664. # We want to initialize based on a dataframe
  665. self.table = self._create_table_setup()
  666. else:
  667. # no data provided, read-only mode
  668. self.table = self.pd_sql.get_table(self.name, self.schema)
  669. if self.table is None:
  670. raise ValueError(f"Could not init table '{name}'")
  671. def exists(self):
  672. return self.pd_sql.has_table(self.name, self.schema)
  673. def sql_schema(self):
  674. from sqlalchemy.schema import CreateTable
  675. return str(CreateTable(self.table).compile(self.pd_sql.connectable))
  676. def _execute_create(self):
  677. # Inserting table into database, add to MetaData object
  678. if _gt14():
  679. self.table = self.table.to_metadata(self.pd_sql.meta)
  680. else:
  681. self.table = self.table.tometadata(self.pd_sql.meta)
  682. self.table.create(bind=self.pd_sql.connectable)
  683. def create(self):
  684. if self.exists():
  685. if self.if_exists == "fail":
  686. raise ValueError(f"Table '{self.name}' already exists.")
  687. elif self.if_exists == "replace":
  688. self.pd_sql.drop_table(self.name, self.schema)
  689. self._execute_create()
  690. elif self.if_exists == "append":
  691. pass
  692. else:
  693. raise ValueError(f"'{self.if_exists}' is not valid for if_exists")
  694. else:
  695. self._execute_create()
  696. def _execute_insert(self, conn, keys: list[str], data_iter):
  697. """
  698. Execute SQL statement inserting data
  699. Parameters
  700. ----------
  701. conn : sqlalchemy.engine.Engine or sqlalchemy.engine.Connection
  702. keys : list of str
  703. Column names
  704. data_iter : generator of list
  705. Each item contains a list of values to be inserted
  706. """
  707. data = [dict(zip(keys, row)) for row in data_iter]
  708. conn.execute(self.table.insert(), data)
  709. def _execute_insert_multi(self, conn, keys: list[str], data_iter):
  710. """
  711. Alternative to _execute_insert for DBs support multivalue INSERT.
  712. Note: multi-value insert is usually faster for analytics DBs
  713. and tables containing a few columns
  714. but performance degrades quickly with increase of columns.
  715. """
  716. from sqlalchemy import insert
  717. data = [dict(zip(keys, row)) for row in data_iter]
  718. stmt = insert(self.table).values(data)
  719. conn.execute(stmt)
  720. def insert_data(self):
  721. if self.index is not None:
  722. temp = self.frame.copy()
  723. temp.index.names = self.index
  724. try:
  725. temp.reset_index(inplace=True)
  726. except ValueError as err:
  727. raise ValueError(f"duplicate name in index/columns: {err}") from err
  728. else:
  729. temp = self.frame
  730. column_names = list(map(str, temp.columns))
  731. ncols = len(column_names)
  732. data_list = [None] * ncols
  733. for i, (_, ser) in enumerate(temp.items()):
  734. vals = ser._values
  735. if vals.dtype.kind == "M":
  736. d = vals.to_pydatetime()
  737. elif vals.dtype.kind == "m":
  738. # store as integers, see GH#6921, GH#7076
  739. d = vals.view("i8").astype(object)
  740. else:
  741. d = vals.astype(object)
  742. assert isinstance(d, np.ndarray), type(d)
  743. if ser._can_hold_na:
  744. # Note: this will miss timedeltas since they are converted to int
  745. mask = isna(d)
  746. d[mask] = None
  747. # error: No overload variant of "__setitem__" of "list" matches
  748. # argument types "int", "ndarray"
  749. data_list[i] = d # type: ignore[call-overload]
  750. return column_names, data_list
  751. def insert(self, chunksize: int | None = None, method: str | None = None):
  752. # set insert method
  753. if method is None:
  754. exec_insert = self._execute_insert
  755. elif method == "multi":
  756. exec_insert = self._execute_insert_multi
  757. elif callable(method):
  758. exec_insert = partial(method, self)
  759. else:
  760. raise ValueError(f"Invalid parameter `method`: {method}")
  761. keys, data_list = self.insert_data()
  762. nrows = len(self.frame)
  763. if nrows == 0:
  764. return
  765. if chunksize is None:
  766. chunksize = nrows
  767. elif chunksize == 0:
  768. raise ValueError("chunksize argument should be non-zero")
  769. chunks = (nrows // chunksize) + 1
  770. with self.pd_sql.run_transaction() as conn:
  771. for i in range(chunks):
  772. start_i = i * chunksize
  773. end_i = min((i + 1) * chunksize, nrows)
  774. if start_i >= end_i:
  775. break
  776. chunk_iter = zip(*(arr[start_i:end_i] for arr in data_list))
  777. exec_insert(conn, keys, chunk_iter)
  778. def _query_iterator(
  779. self,
  780. result,
  781. chunksize: str | None,
  782. columns,
  783. coerce_float: bool = True,
  784. parse_dates=None,
  785. ):
  786. """Return generator through chunked result set."""
  787. has_read_data = False
  788. while True:
  789. data = result.fetchmany(chunksize)
  790. if not data:
  791. if not has_read_data:
  792. yield DataFrame.from_records(
  793. [], columns=columns, coerce_float=coerce_float
  794. )
  795. break
  796. else:
  797. has_read_data = True
  798. self.frame = DataFrame.from_records(
  799. data, columns=columns, coerce_float=coerce_float
  800. )
  801. self._harmonize_columns(parse_dates=parse_dates)
  802. if self.index is not None:
  803. self.frame.set_index(self.index, inplace=True)
  804. yield self.frame
  805. def read(self, coerce_float=True, parse_dates=None, columns=None, chunksize=None):
  806. from sqlalchemy import select
  807. if columns is not None and len(columns) > 0:
  808. cols = [self.table.c[n] for n in columns]
  809. if self.index is not None:
  810. for idx in self.index[::-1]:
  811. cols.insert(0, self.table.c[idx])
  812. sql_select = select(*cols) if _gt14() else select(cols)
  813. else:
  814. sql_select = select(self.table) if _gt14() else self.table.select()
  815. result = self.pd_sql.execute(sql_select)
  816. column_names = result.keys()
  817. if chunksize is not None:
  818. return self._query_iterator(
  819. result,
  820. chunksize,
  821. column_names,
  822. coerce_float=coerce_float,
  823. parse_dates=parse_dates,
  824. )
  825. else:
  826. data = result.fetchall()
  827. self.frame = DataFrame.from_records(
  828. data, columns=column_names, coerce_float=coerce_float
  829. )
  830. self._harmonize_columns(parse_dates=parse_dates)
  831. if self.index is not None:
  832. self.frame.set_index(self.index, inplace=True)
  833. return self.frame
  834. def _index_name(self, index, index_label):
  835. # for writing: index=True to include index in sql table
  836. if index is True:
  837. nlevels = self.frame.index.nlevels
  838. # if index_label is specified, set this as index name(s)
  839. if index_label is not None:
  840. if not isinstance(index_label, list):
  841. index_label = [index_label]
  842. if len(index_label) != nlevels:
  843. raise ValueError(
  844. "Length of 'index_label' should match number of "
  845. f"levels, which is {nlevels}"
  846. )
  847. else:
  848. return index_label
  849. # return the used column labels for the index columns
  850. if (
  851. nlevels == 1
  852. and "index" not in self.frame.columns
  853. and self.frame.index.name is None
  854. ):
  855. return ["index"]
  856. else:
  857. return [
  858. l if l is not None else f"level_{i}"
  859. for i, l in enumerate(self.frame.index.names)
  860. ]
  861. # for reading: index=(list of) string to specify column to set as index
  862. elif isinstance(index, str):
  863. return [index]
  864. elif isinstance(index, list):
  865. return index
  866. else:
  867. return None
  868. def _get_column_names_and_types(self, dtype_mapper):
  869. column_names_and_types = []
  870. if self.index is not None:
  871. for i, idx_label in enumerate(self.index):
  872. idx_type = dtype_mapper(self.frame.index._get_level_values(i))
  873. column_names_and_types.append((str(idx_label), idx_type, True))
  874. column_names_and_types += [
  875. (str(self.frame.columns[i]), dtype_mapper(self.frame.iloc[:, i]), False)
  876. for i in range(len(self.frame.columns))
  877. ]
  878. return column_names_and_types
  879. def _create_table_setup(self):
  880. from sqlalchemy import (
  881. Column,
  882. PrimaryKeyConstraint,
  883. Table,
  884. )
  885. from sqlalchemy.schema import MetaData
  886. column_names_and_types = self._get_column_names_and_types(self._sqlalchemy_type)
  887. columns = [
  888. Column(name, typ, index=is_index)
  889. for name, typ, is_index in column_names_and_types
  890. ]
  891. if self.keys is not None:
  892. if not is_list_like(self.keys):
  893. keys = [self.keys]
  894. else:
  895. keys = self.keys
  896. pkc = PrimaryKeyConstraint(*keys, name=self.name + "_pk")
  897. columns.append(pkc)
  898. schema = self.schema or self.pd_sql.meta.schema
  899. # At this point, attach to new metadata, only attach to self.meta
  900. # once table is created.
  901. meta = MetaData()
  902. return Table(self.name, meta, *columns, schema=schema)
  903. def _harmonize_columns(self, parse_dates=None):
  904. """
  905. Make the DataFrame's column types align with the SQL table
  906. column types.
  907. Need to work around limited NA value support. Floats are always
  908. fine, ints must always be floats if there are Null values.
  909. Booleans are hard because converting bool column with None replaces
  910. all Nones with false. Therefore only convert bool if there are no
  911. NA values.
  912. Datetimes should already be converted to np.datetime64 if supported,
  913. but here we also force conversion if required.
  914. """
  915. parse_dates = _process_parse_dates_argument(parse_dates)
  916. for sql_col in self.table.columns:
  917. col_name = sql_col.name
  918. try:
  919. df_col = self.frame[col_name]
  920. # Handle date parsing upfront; don't try to convert columns
  921. # twice
  922. if col_name in parse_dates:
  923. try:
  924. fmt = parse_dates[col_name]
  925. except TypeError:
  926. fmt = None
  927. self.frame[col_name] = _handle_date_column(df_col, format=fmt)
  928. continue
  929. # the type the dataframe column should have
  930. col_type = self._get_dtype(sql_col.type)
  931. if (
  932. col_type is datetime
  933. or col_type is date
  934. or col_type is DatetimeTZDtype
  935. ):
  936. # Convert tz-aware Datetime SQL columns to UTC
  937. utc = col_type is DatetimeTZDtype
  938. self.frame[col_name] = _handle_date_column(df_col, utc=utc)
  939. elif col_type is float:
  940. # floats support NA, can always convert!
  941. self.frame[col_name] = df_col.astype(col_type, copy=False)
  942. elif len(df_col) == df_col.count():
  943. # No NA values, can convert ints and bools
  944. if col_type is np.dtype("int64") or col_type is bool:
  945. self.frame[col_name] = df_col.astype(col_type, copy=False)
  946. except KeyError:
  947. pass # this column not in results
  948. def _sqlalchemy_type(self, col):
  949. dtype: DtypeArg = self.dtype or {}
  950. if is_dict_like(dtype):
  951. dtype = cast(dict, dtype)
  952. if col.name in dtype:
  953. return dtype[col.name]
  954. # Infer type of column, while ignoring missing values.
  955. # Needed for inserting typed data containing NULLs, GH 8778.
  956. col_type = lib.infer_dtype(col, skipna=True)
  957. from sqlalchemy.types import (
  958. TIMESTAMP,
  959. BigInteger,
  960. Boolean,
  961. Date,
  962. DateTime,
  963. Float,
  964. Integer,
  965. SmallInteger,
  966. Text,
  967. Time,
  968. )
  969. if col_type == "datetime64" or col_type == "datetime":
  970. # GH 9086: TIMESTAMP is the suggested type if the column contains
  971. # timezone information
  972. try:
  973. if col.dt.tz is not None:
  974. return TIMESTAMP(timezone=True)
  975. except AttributeError:
  976. # The column is actually a DatetimeIndex
  977. # GH 26761 or an Index with date-like data e.g. 9999-01-01
  978. if getattr(col, "tz", None) is not None:
  979. return TIMESTAMP(timezone=True)
  980. return DateTime
  981. if col_type == "timedelta64":
  982. warnings.warn(
  983. "the 'timedelta' type is not supported, and will be "
  984. "written as integer values (ns frequency) to the database.",
  985. UserWarning,
  986. stacklevel=8,
  987. )
  988. return BigInteger
  989. elif col_type == "floating":
  990. if col.dtype == "float32":
  991. return Float(precision=23)
  992. else:
  993. return Float(precision=53)
  994. elif col_type == "integer":
  995. # GH35076 Map pandas integer to optimal SQLAlchemy integer type
  996. if col.dtype.name.lower() in ("int8", "uint8", "int16"):
  997. return SmallInteger
  998. elif col.dtype.name.lower() in ("uint16", "int32"):
  999. return Integer
  1000. elif col.dtype.name.lower() == "uint64":
  1001. raise ValueError("Unsigned 64 bit integer datatype is not supported")
  1002. else:
  1003. return BigInteger
  1004. elif col_type == "boolean":
  1005. return Boolean
  1006. elif col_type == "date":
  1007. return Date
  1008. elif col_type == "time":
  1009. return Time
  1010. elif col_type == "complex":
  1011. raise ValueError("Complex datatypes not supported")
  1012. return Text
  1013. def _get_dtype(self, sqltype):
  1014. from sqlalchemy.types import (
  1015. TIMESTAMP,
  1016. Boolean,
  1017. Date,
  1018. DateTime,
  1019. Float,
  1020. Integer,
  1021. )
  1022. if isinstance(sqltype, Float):
  1023. return float
  1024. elif isinstance(sqltype, Integer):
  1025. # TODO: Refine integer size.
  1026. return np.dtype("int64")
  1027. elif isinstance(sqltype, TIMESTAMP):
  1028. # we have a timezone capable type
  1029. if not sqltype.timezone:
  1030. return datetime
  1031. return DatetimeTZDtype
  1032. elif isinstance(sqltype, DateTime):
  1033. # Caution: np.datetime64 is also a subclass of np.number.
  1034. return datetime
  1035. elif isinstance(sqltype, Date):
  1036. return date
  1037. elif isinstance(sqltype, Boolean):
  1038. return bool
  1039. return object
  1040. class PandasSQL(PandasObject):
  1041. """
  1042. Subclasses Should define read_sql and to_sql.
  1043. """
  1044. def read_sql(self, *args, **kwargs):
  1045. raise ValueError(
  1046. "PandasSQL must be created with an SQLAlchemy "
  1047. "connectable or sqlite connection"
  1048. )
  1049. def to_sql(
  1050. self,
  1051. frame,
  1052. name,
  1053. if_exists="fail",
  1054. index=True,
  1055. index_label=None,
  1056. schema=None,
  1057. chunksize=None,
  1058. dtype: DtypeArg | None = None,
  1059. method=None,
  1060. ):
  1061. raise ValueError(
  1062. "PandasSQL must be created with an SQLAlchemy "
  1063. "connectable or sqlite connection"
  1064. )
  1065. class BaseEngine:
  1066. def insert_records(
  1067. self,
  1068. table: SQLTable,
  1069. con,
  1070. frame,
  1071. name,
  1072. index=True,
  1073. schema=None,
  1074. chunksize=None,
  1075. method=None,
  1076. **engine_kwargs,
  1077. ):
  1078. """
  1079. Inserts data into already-prepared table
  1080. """
  1081. raise AbstractMethodError(self)
  1082. class SQLAlchemyEngine(BaseEngine):
  1083. def __init__(self):
  1084. import_optional_dependency(
  1085. "sqlalchemy", extra="sqlalchemy is required for SQL support."
  1086. )
  1087. def insert_records(
  1088. self,
  1089. table: SQLTable,
  1090. con,
  1091. frame,
  1092. name,
  1093. index=True,
  1094. schema=None,
  1095. chunksize=None,
  1096. method=None,
  1097. **engine_kwargs,
  1098. ):
  1099. from sqlalchemy import exc
  1100. try:
  1101. table.insert(chunksize=chunksize, method=method)
  1102. except exc.SQLAlchemyError as err:
  1103. # GH34431
  1104. # https://stackoverflow.com/a/67358288/6067848
  1105. msg = r"""(\(1054, "Unknown column 'inf(e0)?' in 'field list'"\))(?#
  1106. )|inf can not be used with MySQL"""
  1107. err_text = str(err.orig)
  1108. if re.search(msg, err_text):
  1109. raise ValueError("inf cannot be used with MySQL") from err
  1110. else:
  1111. raise err
  1112. def get_engine(engine: str) -> BaseEngine:
  1113. """return our implementation"""
  1114. if engine == "auto":
  1115. engine = get_option("io.sql.engine")
  1116. if engine == "auto":
  1117. # try engines in this order
  1118. engine_classes = [SQLAlchemyEngine]
  1119. error_msgs = ""
  1120. for engine_class in engine_classes:
  1121. try:
  1122. return engine_class()
  1123. except ImportError as err:
  1124. error_msgs += "\n - " + str(err)
  1125. raise ImportError(
  1126. "Unable to find a usable engine; "
  1127. "tried using: 'sqlalchemy'.\n"
  1128. "A suitable version of "
  1129. "sqlalchemy is required for sql I/O "
  1130. "support.\n"
  1131. "Trying to import the above resulted in these errors:"
  1132. f"{error_msgs}"
  1133. )
  1134. elif engine == "sqlalchemy":
  1135. return SQLAlchemyEngine()
  1136. raise ValueError("engine must be one of 'auto', 'sqlalchemy'")
  1137. class SQLDatabase(PandasSQL):
  1138. """
  1139. This class enables conversion between DataFrame and SQL databases
  1140. using SQLAlchemy to handle DataBase abstraction.
  1141. Parameters
  1142. ----------
  1143. engine : SQLAlchemy connectable
  1144. Connectable to connect with the database. Using SQLAlchemy makes it
  1145. possible to use any DB supported by that library.
  1146. schema : string, default None
  1147. Name of SQL schema in database to write to (if database flavor
  1148. supports this). If None, use default schema (default).
  1149. """
  1150. def __init__(self, engine, schema: str | None = None):
  1151. from sqlalchemy.schema import MetaData
  1152. self.connectable = engine
  1153. self.meta = MetaData(schema=schema)
  1154. self.meta.reflect(bind=engine)
  1155. @contextmanager
  1156. def run_transaction(self):
  1157. from sqlalchemy.engine import Engine
  1158. if isinstance(self.connectable, Engine):
  1159. with self.connectable.connect() as conn:
  1160. with conn.begin():
  1161. yield conn
  1162. else:
  1163. yield self.connectable
  1164. def execute(self, *args, **kwargs):
  1165. """Simple passthrough to SQLAlchemy connectable"""
  1166. return self.connectable.execution_options().execute(*args, **kwargs)
  1167. def read_table(
  1168. self,
  1169. table_name: str,
  1170. index_col: str | Sequence[str] | None = None,
  1171. coerce_float: bool = True,
  1172. parse_dates=None,
  1173. columns=None,
  1174. schema: str | None = None,
  1175. chunksize: int | None = None,
  1176. ):
  1177. """
  1178. Read SQL database table into a DataFrame.
  1179. Parameters
  1180. ----------
  1181. table_name : str
  1182. Name of SQL table in database.
  1183. index_col : string, optional, default: None
  1184. Column to set as index.
  1185. coerce_float : bool, default True
  1186. Attempts to convert values of non-string, non-numeric objects
  1187. (like decimal.Decimal) to floating point. This can result in
  1188. loss of precision.
  1189. parse_dates : list or dict, default: None
  1190. - List of column names to parse as dates.
  1191. - Dict of ``{column_name: format string}`` where format string is
  1192. strftime compatible in case of parsing string times, or is one of
  1193. (D, s, ns, ms, us) in case of parsing integer timestamps.
  1194. - Dict of ``{column_name: arg}``, where the arg corresponds
  1195. to the keyword arguments of :func:`pandas.to_datetime`.
  1196. Especially useful with databases without native Datetime support,
  1197. such as SQLite.
  1198. columns : list, default: None
  1199. List of column names to select from SQL table.
  1200. schema : string, default None
  1201. Name of SQL schema in database to query (if database flavor
  1202. supports this). If specified, this overwrites the default
  1203. schema of the SQL database object.
  1204. chunksize : int, default None
  1205. If specified, return an iterator where `chunksize` is the number
  1206. of rows to include in each chunk.
  1207. Returns
  1208. -------
  1209. DataFrame
  1210. See Also
  1211. --------
  1212. pandas.read_sql_table
  1213. SQLDatabase.read_query
  1214. """
  1215. table = SQLTable(table_name, self, index=index_col, schema=schema)
  1216. return table.read(
  1217. coerce_float=coerce_float,
  1218. parse_dates=parse_dates,
  1219. columns=columns,
  1220. chunksize=chunksize,
  1221. )
  1222. @staticmethod
  1223. def _query_iterator(
  1224. result,
  1225. chunksize: int,
  1226. columns,
  1227. index_col=None,
  1228. coerce_float=True,
  1229. parse_dates=None,
  1230. dtype: DtypeArg | None = None,
  1231. ):
  1232. """Return generator through chunked result set"""
  1233. has_read_data = False
  1234. while True:
  1235. data = result.fetchmany(chunksize)
  1236. if not data:
  1237. if not has_read_data:
  1238. yield _wrap_result(
  1239. [],
  1240. columns,
  1241. index_col=index_col,
  1242. coerce_float=coerce_float,
  1243. parse_dates=parse_dates,
  1244. )
  1245. break
  1246. else:
  1247. has_read_data = True
  1248. yield _wrap_result(
  1249. data,
  1250. columns,
  1251. index_col=index_col,
  1252. coerce_float=coerce_float,
  1253. parse_dates=parse_dates,
  1254. dtype=dtype,
  1255. )
  1256. def read_query(
  1257. self,
  1258. sql: str,
  1259. index_col: str | None = None,
  1260. coerce_float: bool = True,
  1261. parse_dates=None,
  1262. params=None,
  1263. chunksize: int | None = None,
  1264. dtype: DtypeArg | None = None,
  1265. ):
  1266. """
  1267. Read SQL query into a DataFrame.
  1268. Parameters
  1269. ----------
  1270. sql : str
  1271. SQL query to be executed.
  1272. index_col : string, optional, default: None
  1273. Column name to use as index for the returned DataFrame object.
  1274. coerce_float : bool, default True
  1275. Attempt to convert values of non-string, non-numeric objects (like
  1276. decimal.Decimal) to floating point, useful for SQL result sets.
  1277. params : list, tuple or dict, optional, default: None
  1278. List of parameters to pass to execute method. The syntax used
  1279. to pass parameters is database driver dependent. Check your
  1280. database driver documentation for which of the five syntax styles,
  1281. described in PEP 249's paramstyle, is supported.
  1282. Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
  1283. parse_dates : list or dict, default: None
  1284. - List of column names to parse as dates.
  1285. - Dict of ``{column_name: format string}`` where format string is
  1286. strftime compatible in case of parsing string times, or is one of
  1287. (D, s, ns, ms, us) in case of parsing integer timestamps.
  1288. - Dict of ``{column_name: arg dict}``, where the arg dict
  1289. corresponds to the keyword arguments of
  1290. :func:`pandas.to_datetime` Especially useful with databases
  1291. without native Datetime support, such as SQLite.
  1292. chunksize : int, default None
  1293. If specified, return an iterator where `chunksize` is the number
  1294. of rows to include in each chunk.
  1295. dtype : Type name or dict of columns
  1296. Data type for data or columns. E.g. np.float64 or
  1297. {‘a’: np.float64, ‘b’: np.int32, ‘c’: ‘Int64’}
  1298. .. versionadded:: 1.3.0
  1299. Returns
  1300. -------
  1301. DataFrame
  1302. See Also
  1303. --------
  1304. read_sql_table : Read SQL database table into a DataFrame.
  1305. read_sql
  1306. """
  1307. args = _convert_params(sql, params)
  1308. result = self.execute(*args)
  1309. columns = result.keys()
  1310. if chunksize is not None:
  1311. return self._query_iterator(
  1312. result,
  1313. chunksize,
  1314. columns,
  1315. index_col=index_col,
  1316. coerce_float=coerce_float,
  1317. parse_dates=parse_dates,
  1318. dtype=dtype,
  1319. )
  1320. else:
  1321. data = result.fetchall()
  1322. frame = _wrap_result(
  1323. data,
  1324. columns,
  1325. index_col=index_col,
  1326. coerce_float=coerce_float,
  1327. parse_dates=parse_dates,
  1328. dtype=dtype,
  1329. )
  1330. return frame
  1331. read_sql = read_query
  1332. def prep_table(
  1333. self,
  1334. frame,
  1335. name,
  1336. if_exists="fail",
  1337. index=True,
  1338. index_label=None,
  1339. schema=None,
  1340. dtype: DtypeArg | None = None,
  1341. ) -> SQLTable:
  1342. """
  1343. Prepares table in the database for data insertion. Creates it if needed, etc.
  1344. """
  1345. if dtype:
  1346. if not is_dict_like(dtype):
  1347. # error: Value expression in dictionary comprehension has incompatible
  1348. # type "Union[ExtensionDtype, str, dtype[Any], Type[object],
  1349. # Dict[Hashable, Union[ExtensionDtype, Union[str, dtype[Any]],
  1350. # Type[str], Type[float], Type[int], Type[complex], Type[bool],
  1351. # Type[object]]]]"; expected type "Union[ExtensionDtype, str,
  1352. # dtype[Any], Type[object]]"
  1353. dtype = {col_name: dtype for col_name in frame} # type: ignore[misc]
  1354. else:
  1355. dtype = cast(dict, dtype)
  1356. from sqlalchemy.types import (
  1357. TypeEngine,
  1358. to_instance,
  1359. )
  1360. for col, my_type in dtype.items():
  1361. if not isinstance(to_instance(my_type), TypeEngine):
  1362. raise ValueError(f"The type of {col} is not a SQLAlchemy type")
  1363. table = SQLTable(
  1364. name,
  1365. self,
  1366. frame=frame,
  1367. index=index,
  1368. if_exists=if_exists,
  1369. index_label=index_label,
  1370. schema=schema,
  1371. dtype=dtype,
  1372. )
  1373. table.create()
  1374. return table
  1375. def check_case_sensitive(
  1376. self,
  1377. name,
  1378. schema,
  1379. ):
  1380. """
  1381. Checks table name for issues with case-sensitivity.
  1382. Method is called after data is inserted.
  1383. """
  1384. if not name.isdigit() and not name.islower():
  1385. # check for potentially case sensitivity issues (GH7815)
  1386. # Only check when name is not a number and name is not lower case
  1387. engine = self.connectable.engine
  1388. with self.connectable.connect() as conn:
  1389. if _gt14():
  1390. from sqlalchemy import inspect
  1391. insp = inspect(conn)
  1392. table_names = insp.get_table_names(
  1393. schema=schema or self.meta.schema
  1394. )
  1395. else:
  1396. table_names = engine.table_names(
  1397. schema=schema or self.meta.schema, connection=conn
  1398. )
  1399. if name not in table_names:
  1400. msg = (
  1401. f"The provided table name '{name}' is not found exactly as "
  1402. "such in the database after writing the table, possibly "
  1403. "due to case sensitivity issues. Consider using lower "
  1404. "case table names."
  1405. )
  1406. warnings.warn(msg, UserWarning)
  1407. def to_sql(
  1408. self,
  1409. frame,
  1410. name,
  1411. if_exists="fail",
  1412. index=True,
  1413. index_label=None,
  1414. schema=None,
  1415. chunksize=None,
  1416. dtype: DtypeArg | None = None,
  1417. method=None,
  1418. engine="auto",
  1419. **engine_kwargs,
  1420. ):
  1421. """
  1422. Write records stored in a DataFrame to a SQL database.
  1423. Parameters
  1424. ----------
  1425. frame : DataFrame
  1426. name : string
  1427. Name of SQL table.
  1428. if_exists : {'fail', 'replace', 'append'}, default 'fail'
  1429. - fail: If table exists, do nothing.
  1430. - replace: If table exists, drop it, recreate it, and insert data.
  1431. - append: If table exists, insert data. Create if does not exist.
  1432. index : boolean, default True
  1433. Write DataFrame index as a column.
  1434. index_label : string or sequence, default None
  1435. Column label for index column(s). If None is given (default) and
  1436. `index` is True, then the index names are used.
  1437. A sequence should be given if the DataFrame uses MultiIndex.
  1438. schema : string, default None
  1439. Name of SQL schema in database to write to (if database flavor
  1440. supports this). If specified, this overwrites the default
  1441. schema of the SQLDatabase object.
  1442. chunksize : int, default None
  1443. If not None, then rows will be written in batches of this size at a
  1444. time. If None, all rows will be written at once.
  1445. dtype : single type or dict of column name to SQL type, default None
  1446. Optional specifying the datatype for columns. The SQL type should
  1447. be a SQLAlchemy type. If all columns are of the same type, one
  1448. single value can be used.
  1449. method : {None', 'multi', callable}, default None
  1450. Controls the SQL insertion clause used:
  1451. * None : Uses standard SQL ``INSERT`` clause (one per row).
  1452. * 'multi': Pass multiple values in a single ``INSERT`` clause.
  1453. * callable with signature ``(pd_table, conn, keys, data_iter)``.
  1454. Details and a sample callable implementation can be found in the
  1455. section :ref:`insert method <io.sql.method>`.
  1456. engine : {'auto', 'sqlalchemy'}, default 'auto'
  1457. SQL engine library to use. If 'auto', then the option
  1458. ``io.sql.engine`` is used. The default ``io.sql.engine``
  1459. behavior is 'sqlalchemy'
  1460. .. versionadded:: 1.3.0
  1461. **engine_kwargs
  1462. Any additional kwargs are passed to the engine.
  1463. """
  1464. sql_engine = get_engine(engine)
  1465. table = self.prep_table(
  1466. frame=frame,
  1467. name=name,
  1468. if_exists=if_exists,
  1469. index=index,
  1470. index_label=index_label,
  1471. schema=schema,
  1472. dtype=dtype,
  1473. )
  1474. sql_engine.insert_records(
  1475. table=table,
  1476. con=self.connectable,
  1477. frame=frame,
  1478. name=name,
  1479. index=index,
  1480. schema=schema,
  1481. chunksize=chunksize,
  1482. method=method,
  1483. **engine_kwargs,
  1484. )
  1485. self.check_case_sensitive(name=name, schema=schema)
  1486. @property
  1487. def tables(self):
  1488. return self.meta.tables
  1489. def has_table(self, name: str, schema: str | None = None):
  1490. if _gt14():
  1491. from sqlalchemy import inspect
  1492. insp = inspect(self.connectable)
  1493. return insp.has_table(name, schema or self.meta.schema)
  1494. else:
  1495. return self.connectable.run_callable(
  1496. self.connectable.dialect.has_table, name, schema or self.meta.schema
  1497. )
  1498. def get_table(self, table_name: str, schema: str | None = None):
  1499. schema = schema or self.meta.schema
  1500. if schema:
  1501. tbl = self.meta.tables.get(".".join([schema, table_name]))
  1502. else:
  1503. tbl = self.meta.tables.get(table_name)
  1504. # Avoid casting double-precision floats into decimals
  1505. from sqlalchemy import Numeric
  1506. for column in tbl.columns:
  1507. if isinstance(column.type, Numeric):
  1508. column.type.asdecimal = False
  1509. return tbl
  1510. def drop_table(self, table_name: str, schema: str | None = None):
  1511. schema = schema or self.meta.schema
  1512. if self.has_table(table_name, schema):
  1513. self.meta.reflect(bind=self.connectable, only=[table_name], schema=schema)
  1514. self.get_table(table_name, schema).drop(bind=self.connectable)
  1515. self.meta.clear()
  1516. def _create_sql_schema(
  1517. self,
  1518. frame: DataFrame,
  1519. table_name: str,
  1520. keys: list[str] | None = None,
  1521. dtype: DtypeArg | None = None,
  1522. schema: str | None = None,
  1523. ):
  1524. table = SQLTable(
  1525. table_name,
  1526. self,
  1527. frame=frame,
  1528. index=False,
  1529. keys=keys,
  1530. dtype=dtype,
  1531. schema=schema,
  1532. )
  1533. return str(table.sql_schema())
  1534. # ---- SQL without SQLAlchemy ---
  1535. # sqlite-specific sql strings and handler class
  1536. # dictionary used for readability purposes
  1537. _SQL_TYPES = {
  1538. "string": "TEXT",
  1539. "floating": "REAL",
  1540. "integer": "INTEGER",
  1541. "datetime": "TIMESTAMP",
  1542. "date": "DATE",
  1543. "time": "TIME",
  1544. "boolean": "INTEGER",
  1545. }
  1546. def _get_unicode_name(name):
  1547. try:
  1548. uname = str(name).encode("utf-8", "strict").decode("utf-8")
  1549. except UnicodeError as err:
  1550. raise ValueError(f"Cannot convert identifier to UTF-8: '{name}'") from err
  1551. return uname
  1552. def _get_valid_sqlite_name(name):
  1553. # See https://stackoverflow.com/questions/6514274/how-do-you-escape-strings\
  1554. # -for-sqlite-table-column-names-in-python
  1555. # Ensure the string can be encoded as UTF-8.
  1556. # Ensure the string does not include any NUL characters.
  1557. # Replace all " with "".
  1558. # Wrap the entire thing in double quotes.
  1559. uname = _get_unicode_name(name)
  1560. if not len(uname):
  1561. raise ValueError("Empty table or column name specified")
  1562. nul_index = uname.find("\x00")
  1563. if nul_index >= 0:
  1564. raise ValueError("SQLite identifier cannot contain NULs")
  1565. return '"' + uname.replace('"', '""') + '"'
  1566. _SAFE_NAMES_WARNING = (
  1567. "The spaces in these column names will not be changed. "
  1568. "In pandas versions < 0.14, spaces were converted to underscores."
  1569. )
  1570. class SQLiteTable(SQLTable):
  1571. """
  1572. Patch the SQLTable for fallback support.
  1573. Instead of a table variable just use the Create Table statement.
  1574. """
  1575. def __init__(self, *args, **kwargs):
  1576. # GH 8341
  1577. # register an adapter callable for datetime.time object
  1578. import sqlite3
  1579. # this will transform time(12,34,56,789) into '12:34:56.000789'
  1580. # (this is what sqlalchemy does)
  1581. sqlite3.register_adapter(time, lambda _: _.strftime("%H:%M:%S.%f"))
  1582. super().__init__(*args, **kwargs)
  1583. def sql_schema(self):
  1584. return str(";\n".join(self.table))
  1585. def _execute_create(self):
  1586. with self.pd_sql.run_transaction() as conn:
  1587. for stmt in self.table:
  1588. conn.execute(stmt)
  1589. def insert_statement(self, *, num_rows: int):
  1590. names = list(map(str, self.frame.columns))
  1591. wld = "?" # wildcard char
  1592. escape = _get_valid_sqlite_name
  1593. if self.index is not None:
  1594. for idx in self.index[::-1]:
  1595. names.insert(0, idx)
  1596. bracketed_names = [escape(column) for column in names]
  1597. col_names = ",".join(bracketed_names)
  1598. row_wildcards = ",".join([wld] * len(names))
  1599. wildcards = ",".join([f"({row_wildcards})" for _ in range(num_rows)])
  1600. insert_statement = (
  1601. f"INSERT INTO {escape(self.name)} ({col_names}) VALUES {wildcards}"
  1602. )
  1603. return insert_statement
  1604. def _execute_insert(self, conn, keys, data_iter):
  1605. data_list = list(data_iter)
  1606. conn.executemany(self.insert_statement(num_rows=1), data_list)
  1607. def _execute_insert_multi(self, conn, keys, data_iter):
  1608. data_list = list(data_iter)
  1609. flattened_data = [x for row in data_list for x in row]
  1610. conn.execute(self.insert_statement(num_rows=len(data_list)), flattened_data)
  1611. def _create_table_setup(self):
  1612. """
  1613. Return a list of SQL statements that creates a table reflecting the
  1614. structure of a DataFrame. The first entry will be a CREATE TABLE
  1615. statement while the rest will be CREATE INDEX statements.
  1616. """
  1617. column_names_and_types = self._get_column_names_and_types(self._sql_type_name)
  1618. pat = re.compile(r"\s+")
  1619. column_names = [col_name for col_name, _, _ in column_names_and_types]
  1620. if any(map(pat.search, column_names)):
  1621. warnings.warn(_SAFE_NAMES_WARNING, stacklevel=6)
  1622. escape = _get_valid_sqlite_name
  1623. create_tbl_stmts = [
  1624. escape(cname) + " " + ctype for cname, ctype, _ in column_names_and_types
  1625. ]
  1626. if self.keys is not None and len(self.keys):
  1627. if not is_list_like(self.keys):
  1628. keys = [self.keys]
  1629. else:
  1630. keys = self.keys
  1631. cnames_br = ", ".join([escape(c) for c in keys])
  1632. create_tbl_stmts.append(
  1633. f"CONSTRAINT {self.name}_pk PRIMARY KEY ({cnames_br})"
  1634. )
  1635. if self.schema:
  1636. schema_name = self.schema + "."
  1637. else:
  1638. schema_name = ""
  1639. create_stmts = [
  1640. "CREATE TABLE "
  1641. + schema_name
  1642. + escape(self.name)
  1643. + " (\n"
  1644. + ",\n ".join(create_tbl_stmts)
  1645. + "\n)"
  1646. ]
  1647. ix_cols = [cname for cname, _, is_index in column_names_and_types if is_index]
  1648. if len(ix_cols):
  1649. cnames = "_".join(ix_cols)
  1650. cnames_br = ",".join([escape(c) for c in ix_cols])
  1651. create_stmts.append(
  1652. "CREATE INDEX "
  1653. + escape("ix_" + self.name + "_" + cnames)
  1654. + "ON "
  1655. + escape(self.name)
  1656. + " ("
  1657. + cnames_br
  1658. + ")"
  1659. )
  1660. return create_stmts
  1661. def _sql_type_name(self, col):
  1662. dtype: DtypeArg = self.dtype or {}
  1663. if is_dict_like(dtype):
  1664. dtype = cast(dict, dtype)
  1665. if col.name in dtype:
  1666. return dtype[col.name]
  1667. # Infer type of column, while ignoring missing values.
  1668. # Needed for inserting typed data containing NULLs, GH 8778.
  1669. col_type = lib.infer_dtype(col, skipna=True)
  1670. if col_type == "timedelta64":
  1671. warnings.warn(
  1672. "the 'timedelta' type is not supported, and will be "
  1673. "written as integer values (ns frequency) to the database.",
  1674. UserWarning,
  1675. stacklevel=8,
  1676. )
  1677. col_type = "integer"
  1678. elif col_type == "datetime64":
  1679. col_type = "datetime"
  1680. elif col_type == "empty":
  1681. col_type = "string"
  1682. elif col_type == "complex":
  1683. raise ValueError("Complex datatypes not supported")
  1684. if col_type not in _SQL_TYPES:
  1685. col_type = "string"
  1686. return _SQL_TYPES[col_type]
  1687. class SQLiteDatabase(PandasSQL):
  1688. """
  1689. Version of SQLDatabase to support SQLite connections (fallback without
  1690. SQLAlchemy). This should only be used internally.
  1691. Parameters
  1692. ----------
  1693. con : sqlite connection object
  1694. """
  1695. def __init__(self, con):
  1696. self.con = con
  1697. @contextmanager
  1698. def run_transaction(self):
  1699. cur = self.con.cursor()
  1700. try:
  1701. yield cur
  1702. self.con.commit()
  1703. except Exception:
  1704. self.con.rollback()
  1705. raise
  1706. finally:
  1707. cur.close()
  1708. def execute(self, *args, **kwargs):
  1709. cur = self.con.cursor()
  1710. try:
  1711. cur.execute(*args, **kwargs)
  1712. return cur
  1713. except Exception as exc:
  1714. try:
  1715. self.con.rollback()
  1716. except Exception as inner_exc: # pragma: no cover
  1717. ex = DatabaseError(
  1718. f"Execution failed on sql: {args[0]}\n{exc}\nunable to rollback"
  1719. )
  1720. raise ex from inner_exc
  1721. ex = DatabaseError(f"Execution failed on sql '{args[0]}': {exc}")
  1722. raise ex from exc
  1723. @staticmethod
  1724. def _query_iterator(
  1725. cursor,
  1726. chunksize: int,
  1727. columns,
  1728. index_col=None,
  1729. coerce_float: bool = True,
  1730. parse_dates=None,
  1731. dtype: DtypeArg | None = None,
  1732. ):
  1733. """Return generator through chunked result set"""
  1734. has_read_data = False
  1735. while True:
  1736. data = cursor.fetchmany(chunksize)
  1737. if type(data) == tuple:
  1738. data = list(data)
  1739. if not data:
  1740. cursor.close()
  1741. if not has_read_data:
  1742. yield DataFrame.from_records(
  1743. [], columns=columns, coerce_float=coerce_float
  1744. )
  1745. break
  1746. else:
  1747. has_read_data = True
  1748. yield _wrap_result(
  1749. data,
  1750. columns,
  1751. index_col=index_col,
  1752. coerce_float=coerce_float,
  1753. parse_dates=parse_dates,
  1754. dtype=dtype,
  1755. )
  1756. def read_query(
  1757. self,
  1758. sql,
  1759. index_col=None,
  1760. coerce_float: bool = True,
  1761. params=None,
  1762. parse_dates=None,
  1763. chunksize: int | None = None,
  1764. dtype: DtypeArg | None = None,
  1765. ):
  1766. args = _convert_params(sql, params)
  1767. cursor = self.execute(*args)
  1768. columns = [col_desc[0] for col_desc in cursor.description]
  1769. if chunksize is not None:
  1770. return self._query_iterator(
  1771. cursor,
  1772. chunksize,
  1773. columns,
  1774. index_col=index_col,
  1775. coerce_float=coerce_float,
  1776. parse_dates=parse_dates,
  1777. dtype=dtype,
  1778. )
  1779. else:
  1780. data = self._fetchall_as_list(cursor)
  1781. cursor.close()
  1782. frame = _wrap_result(
  1783. data,
  1784. columns,
  1785. index_col=index_col,
  1786. coerce_float=coerce_float,
  1787. parse_dates=parse_dates,
  1788. dtype=dtype,
  1789. )
  1790. return frame
  1791. def _fetchall_as_list(self, cur):
  1792. result = cur.fetchall()
  1793. if not isinstance(result, list):
  1794. result = list(result)
  1795. return result
  1796. def to_sql(
  1797. self,
  1798. frame,
  1799. name,
  1800. if_exists="fail",
  1801. index=True,
  1802. index_label=None,
  1803. schema=None,
  1804. chunksize=None,
  1805. dtype: DtypeArg | None = None,
  1806. method=None,
  1807. **kwargs,
  1808. ):
  1809. """
  1810. Write records stored in a DataFrame to a SQL database.
  1811. Parameters
  1812. ----------
  1813. frame: DataFrame
  1814. name: string
  1815. Name of SQL table.
  1816. if_exists: {'fail', 'replace', 'append'}, default 'fail'
  1817. fail: If table exists, do nothing.
  1818. replace: If table exists, drop it, recreate it, and insert data.
  1819. append: If table exists, insert data. Create if it does not exist.
  1820. index : bool, default True
  1821. Write DataFrame index as a column
  1822. index_label : string or sequence, default None
  1823. Column label for index column(s). If None is given (default) and
  1824. `index` is True, then the index names are used.
  1825. A sequence should be given if the DataFrame uses MultiIndex.
  1826. schema : string, default None
  1827. Ignored parameter included for compatibility with SQLAlchemy
  1828. version of ``to_sql``.
  1829. chunksize : int, default None
  1830. If not None, then rows will be written in batches of this
  1831. size at a time. If None, all rows will be written at once.
  1832. dtype : single type or dict of column name to SQL type, default None
  1833. Optional specifying the datatype for columns. The SQL type should
  1834. be a string. If all columns are of the same type, one single value
  1835. can be used.
  1836. method : {None, 'multi', callable}, default None
  1837. Controls the SQL insertion clause used:
  1838. * None : Uses standard SQL ``INSERT`` clause (one per row).
  1839. * 'multi': Pass multiple values in a single ``INSERT`` clause.
  1840. * callable with signature ``(pd_table, conn, keys, data_iter)``.
  1841. Details and a sample callable implementation can be found in the
  1842. section :ref:`insert method <io.sql.method>`.
  1843. """
  1844. if dtype:
  1845. if not is_dict_like(dtype):
  1846. # error: Value expression in dictionary comprehension has incompatible
  1847. # type "Union[ExtensionDtype, str, dtype[Any], Type[object],
  1848. # Dict[Hashable, Union[ExtensionDtype, Union[str, dtype[Any]],
  1849. # Type[str], Type[float], Type[int], Type[complex], Type[bool],
  1850. # Type[object]]]]"; expected type "Union[ExtensionDtype, str,
  1851. # dtype[Any], Type[object]]"
  1852. dtype = {col_name: dtype for col_name in frame} # type: ignore[misc]
  1853. else:
  1854. dtype = cast(dict, dtype)
  1855. for col, my_type in dtype.items():
  1856. if not isinstance(my_type, str):
  1857. raise ValueError(f"{col} ({my_type}) not a string")
  1858. table = SQLiteTable(
  1859. name,
  1860. self,
  1861. frame=frame,
  1862. index=index,
  1863. if_exists=if_exists,
  1864. index_label=index_label,
  1865. dtype=dtype,
  1866. )
  1867. table.create()
  1868. table.insert(chunksize, method)
  1869. def has_table(self, name: str, schema: str | None = None):
  1870. # TODO(wesm): unused?
  1871. # escape = _get_valid_sqlite_name
  1872. # esc_name = escape(name)
  1873. wld = "?"
  1874. query = f"SELECT name FROM sqlite_master WHERE type='table' AND name={wld};"
  1875. return len(self.execute(query, [name]).fetchall()) > 0
  1876. def get_table(self, table_name: str, schema: str | None = None):
  1877. return None # not supported in fallback mode
  1878. def drop_table(self, name: str, schema: str | None = None):
  1879. drop_sql = f"DROP TABLE {_get_valid_sqlite_name(name)}"
  1880. self.execute(drop_sql)
  1881. def _create_sql_schema(
  1882. self,
  1883. frame,
  1884. table_name: str,
  1885. keys=None,
  1886. dtype: DtypeArg | None = None,
  1887. schema: str | None = None,
  1888. ):
  1889. table = SQLiteTable(
  1890. table_name,
  1891. self,
  1892. frame=frame,
  1893. index=False,
  1894. keys=keys,
  1895. dtype=dtype,
  1896. schema=schema,
  1897. )
  1898. return str(table.sql_schema())
  1899. def get_schema(
  1900. frame,
  1901. name: str,
  1902. keys=None,
  1903. con=None,
  1904. dtype: DtypeArg | None = None,
  1905. schema: str | None = None,
  1906. ):
  1907. """
  1908. Get the SQL db table schema for the given frame.
  1909. Parameters
  1910. ----------
  1911. frame : DataFrame
  1912. name : str
  1913. name of SQL table
  1914. keys : string or sequence, default: None
  1915. columns to use a primary key
  1916. con: an open SQL database connection object or a SQLAlchemy connectable
  1917. Using SQLAlchemy makes it possible to use any DB supported by that
  1918. library, default: None
  1919. If a DBAPI2 object, only sqlite3 is supported.
  1920. dtype : dict of column name to SQL type, default None
  1921. Optional specifying the datatype for columns. The SQL type should
  1922. be a SQLAlchemy type, or a string for sqlite3 fallback connection.
  1923. schema: str, default: None
  1924. Optional specifying the schema to be used in creating the table.
  1925. .. versionadded:: 1.2.0
  1926. """
  1927. pandas_sql = pandasSQL_builder(con=con)
  1928. return pandas_sql._create_sql_schema(
  1929. frame, name, keys=keys, dtype=dtype, schema=schema
  1930. )