sql.py 59 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782
  1. """
  2. Collection of query wrappers / abstractions to both facilitate data
  3. retrieval and to reduce dependency on DB-specific API.
  4. """
  5. from contextlib import contextmanager
  6. from datetime import date, datetime, time
  7. from functools import partial
  8. import re
  9. import warnings
  10. import numpy as np
  11. import pandas._libs.lib as lib
  12. from pandas.core.dtypes.common import is_datetime64tz_dtype, is_dict_like, is_list_like
  13. from pandas.core.dtypes.dtypes import DatetimeTZDtype
  14. from pandas.core.dtypes.missing import isna
  15. from pandas.core.api import DataFrame, Series
  16. from pandas.core.base import PandasObject
  17. from pandas.core.tools.datetimes import to_datetime
  18. class SQLAlchemyRequired(ImportError):
  19. pass
  20. class DatabaseError(IOError):
  21. pass
  22. # -----------------------------------------------------------------------------
  23. # -- Helper functions
  24. _SQLALCHEMY_INSTALLED = None
  25. def _is_sqlalchemy_connectable(con):
  26. global _SQLALCHEMY_INSTALLED
  27. if _SQLALCHEMY_INSTALLED is None:
  28. try:
  29. import sqlalchemy
  30. _SQLALCHEMY_INSTALLED = True
  31. except ImportError:
  32. _SQLALCHEMY_INSTALLED = False
  33. if _SQLALCHEMY_INSTALLED:
  34. import sqlalchemy # noqa: F811
  35. return isinstance(con, sqlalchemy.engine.Connectable)
  36. else:
  37. return False
  38. def _convert_params(sql, params):
  39. """Convert SQL and params args to DBAPI2.0 compliant format."""
  40. args = [sql]
  41. if params is not None:
  42. if hasattr(params, "keys"): # test if params is a mapping
  43. args += [params]
  44. else:
  45. args += [list(params)]
  46. return args
  47. def _process_parse_dates_argument(parse_dates):
  48. """Process parse_dates argument for read_sql functions"""
  49. # handle non-list entries for parse_dates gracefully
  50. if parse_dates is True or parse_dates is None or parse_dates is False:
  51. parse_dates = []
  52. elif not hasattr(parse_dates, "__iter__"):
  53. parse_dates = [parse_dates]
  54. return parse_dates
  55. def _handle_date_column(col, utc=None, format=None):
  56. if isinstance(format, dict):
  57. return to_datetime(col, errors="ignore", **format)
  58. else:
  59. # Allow passing of formatting string for integers
  60. # GH17855
  61. if format is None and (
  62. issubclass(col.dtype.type, np.floating)
  63. or issubclass(col.dtype.type, np.integer)
  64. ):
  65. format = "s"
  66. if format in ["D", "d", "h", "m", "s", "ms", "us", "ns"]:
  67. return to_datetime(col, errors="coerce", unit=format, utc=utc)
  68. elif is_datetime64tz_dtype(col):
  69. # coerce to UTC timezone
  70. # GH11216
  71. return to_datetime(col, utc=True)
  72. else:
  73. return to_datetime(col, errors="coerce", format=format, utc=utc)
  74. def _parse_date_columns(data_frame, parse_dates):
  75. """
  76. Force non-datetime columns to be read as such.
  77. Supports both string formatted and integer timestamp columns.
  78. """
  79. parse_dates = _process_parse_dates_argument(parse_dates)
  80. # we want to coerce datetime64_tz dtypes for now to UTC
  81. # we could in theory do a 'nice' conversion from a FixedOffset tz
  82. # GH11216
  83. for col_name, df_col in data_frame.items():
  84. if is_datetime64tz_dtype(df_col) or col_name in parse_dates:
  85. try:
  86. fmt = parse_dates[col_name]
  87. except TypeError:
  88. fmt = None
  89. data_frame[col_name] = _handle_date_column(df_col, format=fmt)
  90. return data_frame
  91. def _wrap_result(data, columns, index_col=None, coerce_float=True, parse_dates=None):
  92. """Wrap result set of query in a DataFrame."""
  93. frame = DataFrame.from_records(data, columns=columns, coerce_float=coerce_float)
  94. frame = _parse_date_columns(frame, parse_dates)
  95. if index_col is not None:
  96. frame.set_index(index_col, inplace=True)
  97. return frame
  98. def execute(sql, con, cur=None, params=None):
  99. """
  100. Execute the given SQL query using the provided connection object.
  101. Parameters
  102. ----------
  103. sql : string
  104. SQL query to be executed.
  105. con : SQLAlchemy connectable(engine/connection) or sqlite3 connection
  106. Using SQLAlchemy makes it possible to use any DB supported by the
  107. library.
  108. If a DBAPI2 object, only sqlite3 is supported.
  109. cur : deprecated, cursor is obtained from connection, default: None
  110. params : list or tuple, optional, default: None
  111. List of parameters to pass to execute method.
  112. Returns
  113. -------
  114. Results Iterable
  115. """
  116. if cur is None:
  117. pandas_sql = pandasSQL_builder(con)
  118. else:
  119. pandas_sql = pandasSQL_builder(cur, is_cursor=True)
  120. args = _convert_params(sql, params)
  121. return pandas_sql.execute(*args)
  122. # -----------------------------------------------------------------------------
  123. # -- Read and write to DataFrames
  124. def read_sql_table(
  125. table_name,
  126. con,
  127. schema=None,
  128. index_col=None,
  129. coerce_float=True,
  130. parse_dates=None,
  131. columns=None,
  132. chunksize=None,
  133. ):
  134. """
  135. Read SQL database table into a DataFrame.
  136. Given a table name and a SQLAlchemy connectable, returns a DataFrame.
  137. This function does not support DBAPI connections.
  138. Parameters
  139. ----------
  140. table_name : str
  141. Name of SQL table in database.
  142. con : SQLAlchemy connectable or str
  143. A database URI could be provided as as str.
  144. SQLite DBAPI connection mode not supported.
  145. schema : str, default None
  146. Name of SQL schema in database to query (if database flavor
  147. supports this). Uses default schema if None (default).
  148. index_col : str or list of str, optional, default: None
  149. Column(s) to set as index(MultiIndex).
  150. coerce_float : bool, default True
  151. Attempts to convert values of non-string, non-numeric objects (like
  152. decimal.Decimal) to floating point. Can result in loss of Precision.
  153. parse_dates : list or dict, default None
  154. - List of column names to parse as dates.
  155. - Dict of ``{column_name: format string}`` where format string is
  156. strftime compatible in case of parsing string times or is one of
  157. (D, s, ns, ms, us) in case of parsing integer timestamps.
  158. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds
  159. to the keyword arguments of :func:`pandas.to_datetime`
  160. Especially useful with databases without native Datetime support,
  161. such as SQLite.
  162. columns : list, default None
  163. List of column names to select from SQL table.
  164. chunksize : int, default None
  165. If specified, returns an iterator where `chunksize` is the number of
  166. rows to include in each chunk.
  167. Returns
  168. -------
  169. DataFrame
  170. A SQL table is returned as two-dimensional data structure with labeled
  171. axes.
  172. See Also
  173. --------
  174. read_sql_query : Read SQL query into a DataFrame.
  175. read_sql : Read SQL query or database table into a DataFrame.
  176. Notes
  177. -----
  178. Any datetime values with time zone information will be converted to UTC.
  179. Examples
  180. --------
  181. >>> pd.read_sql_table('table_name', 'postgres:///db_name') # doctest:+SKIP
  182. """
  183. con = _engine_builder(con)
  184. if not _is_sqlalchemy_connectable(con):
  185. raise NotImplementedError(
  186. "read_sql_table only supported for SQLAlchemy connectable."
  187. )
  188. import sqlalchemy
  189. from sqlalchemy.schema import MetaData
  190. meta = MetaData(con, schema=schema)
  191. try:
  192. meta.reflect(only=[table_name], views=True)
  193. except sqlalchemy.exc.InvalidRequestError:
  194. raise ValueError(f"Table {table_name} not found")
  195. pandas_sql = SQLDatabase(con, meta=meta)
  196. table = pandas_sql.read_table(
  197. table_name,
  198. index_col=index_col,
  199. coerce_float=coerce_float,
  200. parse_dates=parse_dates,
  201. columns=columns,
  202. chunksize=chunksize,
  203. )
  204. if table is not None:
  205. return table
  206. else:
  207. raise ValueError(f"Table {table_name} not found", con)
  208. def read_sql_query(
  209. sql,
  210. con,
  211. index_col=None,
  212. coerce_float=True,
  213. params=None,
  214. parse_dates=None,
  215. chunksize=None,
  216. ):
  217. """
  218. Read SQL query into a DataFrame.
  219. Returns a DataFrame corresponding to the result set of the query
  220. string. Optionally provide an `index_col` parameter to use one of the
  221. columns as the index, otherwise default integer index will be used.
  222. Parameters
  223. ----------
  224. sql : str SQL query or SQLAlchemy Selectable (select or text object)
  225. SQL query to be executed.
  226. con : SQLAlchemy connectable(engine/connection), database str URI,
  227. or sqlite3 DBAPI2 connection
  228. Using SQLAlchemy makes it possible to use any DB supported by that
  229. library.
  230. If a DBAPI2 object, only sqlite3 is supported.
  231. index_col : str or list of strings, optional, default: None
  232. Column(s) to set as index(MultiIndex).
  233. coerce_float : bool, default True
  234. Attempts to convert values of non-string, non-numeric objects (like
  235. decimal.Decimal) to floating point. Useful for SQL result sets.
  236. params : list, tuple or dict, optional, default: None
  237. List of parameters to pass to execute method. The syntax used
  238. to pass parameters is database driver dependent. Check your
  239. database driver documentation for which of the five syntax styles,
  240. described in PEP 249's paramstyle, is supported.
  241. Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}.
  242. parse_dates : list or dict, default: None
  243. - List of column names to parse as dates.
  244. - Dict of ``{column_name: format string}`` where format string is
  245. strftime compatible in case of parsing string times, or is one of
  246. (D, s, ns, ms, us) in case of parsing integer timestamps.
  247. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds
  248. to the keyword arguments of :func:`pandas.to_datetime`
  249. Especially useful with databases without native Datetime support,
  250. such as SQLite.
  251. chunksize : int, default None
  252. If specified, return an iterator where `chunksize` is the number of
  253. rows to include in each chunk.
  254. Returns
  255. -------
  256. DataFrame
  257. See Also
  258. --------
  259. read_sql_table : Read SQL database table into a DataFrame.
  260. read_sql
  261. Notes
  262. -----
  263. Any datetime values with time zone information parsed via the `parse_dates`
  264. parameter will be converted to UTC.
  265. """
  266. pandas_sql = pandasSQL_builder(con)
  267. return pandas_sql.read_query(
  268. sql,
  269. index_col=index_col,
  270. params=params,
  271. coerce_float=coerce_float,
  272. parse_dates=parse_dates,
  273. chunksize=chunksize,
  274. )
  275. def read_sql(
  276. sql,
  277. con,
  278. index_col=None,
  279. coerce_float=True,
  280. params=None,
  281. parse_dates=None,
  282. columns=None,
  283. chunksize=None,
  284. ):
  285. """
  286. Read SQL query or database table into a DataFrame.
  287. This function is a convenience wrapper around ``read_sql_table`` and
  288. ``read_sql_query`` (for backward compatibility). It will delegate
  289. to the specific function depending on the provided input. A SQL query
  290. will be routed to ``read_sql_query``, while a database table name will
  291. be routed to ``read_sql_table``. Note that the delegated function might
  292. have more specific notes about their functionality not listed here.
  293. Parameters
  294. ----------
  295. sql : str or SQLAlchemy Selectable (select or text object)
  296. SQL query to be executed or a table name.
  297. con : SQLAlchemy connectable (engine/connection) or database str URI
  298. or DBAPI2 connection (fallback mode)'
  299. Using SQLAlchemy makes it possible to use any DB supported by that
  300. library. If a DBAPI2 object, only sqlite3 is supported. The user is responsible
  301. for engine disposal and connection closure for the SQLAlchemy connectable. See
  302. `here <https://docs.sqlalchemy.org/en/13/core/connections.html>`_
  303. index_col : str or list of strings, optional, default: None
  304. Column(s) to set as index(MultiIndex).
  305. coerce_float : bool, default True
  306. Attempts to convert values of non-string, non-numeric objects (like
  307. decimal.Decimal) to floating point, useful for SQL result sets.
  308. params : list, tuple or dict, optional, default: None
  309. List of parameters to pass to execute method. The syntax used
  310. to pass parameters is database driver dependent. Check your
  311. database driver documentation for which of the five syntax styles,
  312. described in PEP 249's paramstyle, is supported.
  313. Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}.
  314. parse_dates : list or dict, default: None
  315. - List of column names to parse as dates.
  316. - Dict of ``{column_name: format string}`` where format string is
  317. strftime compatible in case of parsing string times, or is one of
  318. (D, s, ns, ms, us) in case of parsing integer timestamps.
  319. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds
  320. to the keyword arguments of :func:`pandas.to_datetime`
  321. Especially useful with databases without native Datetime support,
  322. such as SQLite.
  323. columns : list, default: None
  324. List of column names to select from SQL table (only used when reading
  325. a table).
  326. chunksize : int, default None
  327. If specified, return an iterator where `chunksize` is the
  328. number of rows to include in each chunk.
  329. Returns
  330. -------
  331. DataFrame
  332. See Also
  333. --------
  334. read_sql_table : Read SQL database table into a DataFrame.
  335. read_sql_query : Read SQL query into a DataFrame.
  336. """
  337. pandas_sql = pandasSQL_builder(con)
  338. if isinstance(pandas_sql, SQLiteDatabase):
  339. return pandas_sql.read_query(
  340. sql,
  341. index_col=index_col,
  342. params=params,
  343. coerce_float=coerce_float,
  344. parse_dates=parse_dates,
  345. chunksize=chunksize,
  346. )
  347. try:
  348. _is_table_name = pandas_sql.has_table(sql)
  349. except Exception:
  350. # using generic exception to catch errors from sql drivers (GH24988)
  351. _is_table_name = False
  352. if _is_table_name:
  353. pandas_sql.meta.reflect(only=[sql])
  354. return pandas_sql.read_table(
  355. sql,
  356. index_col=index_col,
  357. coerce_float=coerce_float,
  358. parse_dates=parse_dates,
  359. columns=columns,
  360. chunksize=chunksize,
  361. )
  362. else:
  363. return pandas_sql.read_query(
  364. sql,
  365. index_col=index_col,
  366. params=params,
  367. coerce_float=coerce_float,
  368. parse_dates=parse_dates,
  369. chunksize=chunksize,
  370. )
  371. def to_sql(
  372. frame,
  373. name,
  374. con,
  375. schema=None,
  376. if_exists="fail",
  377. index=True,
  378. index_label=None,
  379. chunksize=None,
  380. dtype=None,
  381. method=None,
  382. ):
  383. """
  384. Write records stored in a DataFrame to a SQL database.
  385. Parameters
  386. ----------
  387. frame : DataFrame, Series
  388. name : str
  389. Name of SQL table.
  390. con : SQLAlchemy connectable(engine/connection) or database string URI
  391. or sqlite3 DBAPI2 connection
  392. Using SQLAlchemy makes it possible to use any DB supported by that
  393. library.
  394. If a DBAPI2 object, only sqlite3 is supported.
  395. schema : str, optional
  396. Name of SQL schema in database to write to (if database flavor
  397. supports this). If None, use default schema (default).
  398. if_exists : {'fail', 'replace', 'append'}, default 'fail'
  399. - fail: If table exists, do nothing.
  400. - replace: If table exists, drop it, recreate it, and insert data.
  401. - append: If table exists, insert data. Create if does not exist.
  402. index : boolean, default True
  403. Write DataFrame index as a column.
  404. index_label : str or sequence, optional
  405. Column label for index column(s). If None is given (default) and
  406. `index` is True, then the index names are used.
  407. A sequence should be given if the DataFrame uses MultiIndex.
  408. chunksize : int, optional
  409. Specify the number of rows in each batch to be written at a time.
  410. By default, all rows will be written at once.
  411. dtype : dict or scalar, optional
  412. Specifying the datatype for columns. If a dictionary is used, the
  413. keys should be the column names and the values should be the
  414. SQLAlchemy types or strings for the sqlite3 fallback mode. If a
  415. scalar is provided, it will be applied to all columns.
  416. method : {None, 'multi', callable}, optional
  417. Controls the SQL insertion clause used:
  418. - None : Uses standard SQL ``INSERT`` clause (one per row).
  419. - 'multi': Pass multiple values in a single ``INSERT`` clause.
  420. - callable with signature ``(pd_table, conn, keys, data_iter)``.
  421. Details and a sample callable implementation can be found in the
  422. section :ref:`insert method <io.sql.method>`.
  423. .. versionadded:: 0.24.0
  424. """
  425. if if_exists not in ("fail", "replace", "append"):
  426. raise ValueError(f"'{if_exists}' is not valid for if_exists")
  427. pandas_sql = pandasSQL_builder(con, schema=schema)
  428. if isinstance(frame, Series):
  429. frame = frame.to_frame()
  430. elif not isinstance(frame, DataFrame):
  431. raise NotImplementedError(
  432. "'frame' argument should be either a Series or a DataFrame"
  433. )
  434. pandas_sql.to_sql(
  435. frame,
  436. name,
  437. if_exists=if_exists,
  438. index=index,
  439. index_label=index_label,
  440. schema=schema,
  441. chunksize=chunksize,
  442. dtype=dtype,
  443. method=method,
  444. )
  445. def has_table(table_name, con, schema=None):
  446. """
  447. Check if DataBase has named table.
  448. Parameters
  449. ----------
  450. table_name: string
  451. Name of SQL table.
  452. con: SQLAlchemy connectable(engine/connection) or sqlite3 DBAPI2 connection
  453. Using SQLAlchemy makes it possible to use any DB supported by that
  454. library.
  455. If a DBAPI2 object, only sqlite3 is supported.
  456. schema : string, default None
  457. Name of SQL schema in database to write to (if database flavor supports
  458. this). If None, use default schema (default).
  459. Returns
  460. -------
  461. boolean
  462. """
  463. pandas_sql = pandasSQL_builder(con, schema=schema)
  464. return pandas_sql.has_table(table_name)
  465. table_exists = has_table
  466. def _engine_builder(con):
  467. """
  468. Returns a SQLAlchemy engine from a URI (if con is a string)
  469. else it just return con without modifying it.
  470. """
  471. global _SQLALCHEMY_INSTALLED
  472. if isinstance(con, str):
  473. try:
  474. import sqlalchemy
  475. except ImportError:
  476. _SQLALCHEMY_INSTALLED = False
  477. else:
  478. con = sqlalchemy.create_engine(con)
  479. return con
  480. return con
  481. def pandasSQL_builder(con, schema=None, meta=None, is_cursor=False):
  482. """
  483. Convenience function to return the correct PandasSQL subclass based on the
  484. provided parameters.
  485. """
  486. # When support for DBAPI connections is removed,
  487. # is_cursor should not be necessary.
  488. con = _engine_builder(con)
  489. if _is_sqlalchemy_connectable(con):
  490. return SQLDatabase(con, schema=schema, meta=meta)
  491. elif isinstance(con, str):
  492. raise ImportError("Using URI string without sqlalchemy installed.")
  493. else:
  494. return SQLiteDatabase(con, is_cursor=is_cursor)
  495. class SQLTable(PandasObject):
  496. """
  497. For mapping Pandas tables to SQL tables.
  498. Uses fact that table is reflected by SQLAlchemy to
  499. do better type conversions.
  500. Also holds various flags needed to avoid having to
  501. pass them between functions all the time.
  502. """
  503. # TODO: support for multiIndex
  504. def __init__(
  505. self,
  506. name,
  507. pandas_sql_engine,
  508. frame=None,
  509. index=True,
  510. if_exists="fail",
  511. prefix="pandas",
  512. index_label=None,
  513. schema=None,
  514. keys=None,
  515. dtype=None,
  516. ):
  517. self.name = name
  518. self.pd_sql = pandas_sql_engine
  519. self.prefix = prefix
  520. self.frame = frame
  521. self.index = self._index_name(index, index_label)
  522. self.schema = schema
  523. self.if_exists = if_exists
  524. self.keys = keys
  525. self.dtype = dtype
  526. if frame is not None:
  527. # We want to initialize based on a dataframe
  528. self.table = self._create_table_setup()
  529. else:
  530. # no data provided, read-only mode
  531. self.table = self.pd_sql.get_table(self.name, self.schema)
  532. if self.table is None:
  533. raise ValueError(f"Could not init table '{name}'")
  534. def exists(self):
  535. return self.pd_sql.has_table(self.name, self.schema)
  536. def sql_schema(self):
  537. from sqlalchemy.schema import CreateTable
  538. return str(CreateTable(self.table).compile(self.pd_sql.connectable))
  539. def _execute_create(self):
  540. # Inserting table into database, add to MetaData object
  541. self.table = self.table.tometadata(self.pd_sql.meta)
  542. self.table.create()
  543. def create(self):
  544. if self.exists():
  545. if self.if_exists == "fail":
  546. raise ValueError(f"Table '{self.name}' already exists.")
  547. elif self.if_exists == "replace":
  548. self.pd_sql.drop_table(self.name, self.schema)
  549. self._execute_create()
  550. elif self.if_exists == "append":
  551. pass
  552. else:
  553. raise ValueError(f"'{self.if_exists}' is not valid for if_exists")
  554. else:
  555. self._execute_create()
  556. def _execute_insert(self, conn, keys, data_iter):
  557. """Execute SQL statement inserting data
  558. Parameters
  559. ----------
  560. conn : sqlalchemy.engine.Engine or sqlalchemy.engine.Connection
  561. keys : list of str
  562. Column names
  563. data_iter : generator of list
  564. Each item contains a list of values to be inserted
  565. """
  566. data = [dict(zip(keys, row)) for row in data_iter]
  567. conn.execute(self.table.insert(), data)
  568. def _execute_insert_multi(self, conn, keys, data_iter):
  569. """Alternative to _execute_insert for DBs support multivalue INSERT.
  570. Note: multi-value insert is usually faster for analytics DBs
  571. and tables containing a few columns
  572. but performance degrades quickly with increase of columns.
  573. """
  574. data = [dict(zip(keys, row)) for row in data_iter]
  575. conn.execute(self.table.insert(data))
  576. def insert_data(self):
  577. if self.index is not None:
  578. temp = self.frame.copy()
  579. temp.index.names = self.index
  580. try:
  581. temp.reset_index(inplace=True)
  582. except ValueError as err:
  583. raise ValueError(f"duplicate name in index/columns: {err}")
  584. else:
  585. temp = self.frame
  586. column_names = list(map(str, temp.columns))
  587. ncols = len(column_names)
  588. data_list = [None] * ncols
  589. blocks = temp._data.blocks
  590. for b in blocks:
  591. if b.is_datetime:
  592. # return datetime.datetime objects
  593. if b.is_datetimetz:
  594. # GH 9086: Ensure we return datetimes with timezone info
  595. # Need to return 2-D data; DatetimeIndex is 1D
  596. d = b.values.to_pydatetime()
  597. d = np.atleast_2d(d)
  598. else:
  599. # convert to microsecond resolution for datetime.datetime
  600. d = b.values.astype("M8[us]").astype(object)
  601. else:
  602. d = np.array(b.get_values(), dtype=object)
  603. # replace NaN with None
  604. if b._can_hold_na:
  605. mask = isna(d)
  606. d[mask] = None
  607. for col_loc, col in zip(b.mgr_locs, d):
  608. data_list[col_loc] = col
  609. return column_names, data_list
  610. def insert(self, chunksize=None, method=None):
  611. # set insert method
  612. if method is None:
  613. exec_insert = self._execute_insert
  614. elif method == "multi":
  615. exec_insert = self._execute_insert_multi
  616. elif callable(method):
  617. exec_insert = partial(method, self)
  618. else:
  619. raise ValueError(f"Invalid parameter `method`: {method}")
  620. keys, data_list = self.insert_data()
  621. nrows = len(self.frame)
  622. if nrows == 0:
  623. return
  624. if chunksize is None:
  625. chunksize = nrows
  626. elif chunksize == 0:
  627. raise ValueError("chunksize argument should be non-zero")
  628. chunks = int(nrows / chunksize) + 1
  629. with self.pd_sql.run_transaction() as conn:
  630. for i in range(chunks):
  631. start_i = i * chunksize
  632. end_i = min((i + 1) * chunksize, nrows)
  633. if start_i >= end_i:
  634. break
  635. chunk_iter = zip(*[arr[start_i:end_i] for arr in data_list])
  636. exec_insert(conn, keys, chunk_iter)
  637. def _query_iterator(
  638. self, result, chunksize, columns, coerce_float=True, parse_dates=None
  639. ):
  640. """Return generator through chunked result set."""
  641. while True:
  642. data = result.fetchmany(chunksize)
  643. if not data:
  644. break
  645. else:
  646. self.frame = DataFrame.from_records(
  647. data, columns=columns, coerce_float=coerce_float
  648. )
  649. self._harmonize_columns(parse_dates=parse_dates)
  650. if self.index is not None:
  651. self.frame.set_index(self.index, inplace=True)
  652. yield self.frame
  653. def read(self, coerce_float=True, parse_dates=None, columns=None, chunksize=None):
  654. if columns is not None and len(columns) > 0:
  655. from sqlalchemy import select
  656. cols = [self.table.c[n] for n in columns]
  657. if self.index is not None:
  658. for idx in self.index[::-1]:
  659. cols.insert(0, self.table.c[idx])
  660. sql_select = select(cols)
  661. else:
  662. sql_select = self.table.select()
  663. result = self.pd_sql.execute(sql_select)
  664. column_names = result.keys()
  665. if chunksize is not None:
  666. return self._query_iterator(
  667. result,
  668. chunksize,
  669. column_names,
  670. coerce_float=coerce_float,
  671. parse_dates=parse_dates,
  672. )
  673. else:
  674. data = result.fetchall()
  675. self.frame = DataFrame.from_records(
  676. data, columns=column_names, coerce_float=coerce_float
  677. )
  678. self._harmonize_columns(parse_dates=parse_dates)
  679. if self.index is not None:
  680. self.frame.set_index(self.index, inplace=True)
  681. return self.frame
  682. def _index_name(self, index, index_label):
  683. # for writing: index=True to include index in sql table
  684. if index is True:
  685. nlevels = self.frame.index.nlevels
  686. # if index_label is specified, set this as index name(s)
  687. if index_label is not None:
  688. if not isinstance(index_label, list):
  689. index_label = [index_label]
  690. if len(index_label) != nlevels:
  691. raise ValueError(
  692. "Length of 'index_label' should match number of "
  693. f"levels, which is {nlevels}"
  694. )
  695. else:
  696. return index_label
  697. # return the used column labels for the index columns
  698. if (
  699. nlevels == 1
  700. and "index" not in self.frame.columns
  701. and self.frame.index.name is None
  702. ):
  703. return ["index"]
  704. else:
  705. return [
  706. l if l is not None else f"level_{i}"
  707. for i, l in enumerate(self.frame.index.names)
  708. ]
  709. # for reading: index=(list of) string to specify column to set as index
  710. elif isinstance(index, str):
  711. return [index]
  712. elif isinstance(index, list):
  713. return index
  714. else:
  715. return None
  716. def _get_column_names_and_types(self, dtype_mapper):
  717. column_names_and_types = []
  718. if self.index is not None:
  719. for i, idx_label in enumerate(self.index):
  720. idx_type = dtype_mapper(self.frame.index._get_level_values(i))
  721. column_names_and_types.append((str(idx_label), idx_type, True))
  722. column_names_and_types += [
  723. (str(self.frame.columns[i]), dtype_mapper(self.frame.iloc[:, i]), False)
  724. for i in range(len(self.frame.columns))
  725. ]
  726. return column_names_and_types
  727. def _create_table_setup(self):
  728. from sqlalchemy import Table, Column, PrimaryKeyConstraint
  729. column_names_and_types = self._get_column_names_and_types(self._sqlalchemy_type)
  730. columns = [
  731. Column(name, typ, index=is_index)
  732. for name, typ, is_index in column_names_and_types
  733. ]
  734. if self.keys is not None:
  735. if not is_list_like(self.keys):
  736. keys = [self.keys]
  737. else:
  738. keys = self.keys
  739. pkc = PrimaryKeyConstraint(*keys, name=self.name + "_pk")
  740. columns.append(pkc)
  741. schema = self.schema or self.pd_sql.meta.schema
  742. # At this point, attach to new metadata, only attach to self.meta
  743. # once table is created.
  744. from sqlalchemy.schema import MetaData
  745. meta = MetaData(self.pd_sql, schema=schema)
  746. return Table(self.name, meta, *columns, schema=schema)
  747. def _harmonize_columns(self, parse_dates=None):
  748. """
  749. Make the DataFrame's column types align with the SQL table
  750. column types.
  751. Need to work around limited NA value support. Floats are always
  752. fine, ints must always be floats if there are Null values.
  753. Booleans are hard because converting bool column with None replaces
  754. all Nones with false. Therefore only convert bool if there are no
  755. NA values.
  756. Datetimes should already be converted to np.datetime64 if supported,
  757. but here we also force conversion if required.
  758. """
  759. parse_dates = _process_parse_dates_argument(parse_dates)
  760. for sql_col in self.table.columns:
  761. col_name = sql_col.name
  762. try:
  763. df_col = self.frame[col_name]
  764. # Handle date parsing upfront; don't try to convert columns
  765. # twice
  766. if col_name in parse_dates:
  767. try:
  768. fmt = parse_dates[col_name]
  769. except TypeError:
  770. fmt = None
  771. self.frame[col_name] = _handle_date_column(df_col, format=fmt)
  772. continue
  773. # the type the dataframe column should have
  774. col_type = self._get_dtype(sql_col.type)
  775. if (
  776. col_type is datetime
  777. or col_type is date
  778. or col_type is DatetimeTZDtype
  779. ):
  780. # Convert tz-aware Datetime SQL columns to UTC
  781. utc = col_type is DatetimeTZDtype
  782. self.frame[col_name] = _handle_date_column(df_col, utc=utc)
  783. elif col_type is float:
  784. # floats support NA, can always convert!
  785. self.frame[col_name] = df_col.astype(col_type, copy=False)
  786. elif len(df_col) == df_col.count():
  787. # No NA values, can convert ints and bools
  788. if col_type is np.dtype("int64") or col_type is bool:
  789. self.frame[col_name] = df_col.astype(col_type, copy=False)
  790. except KeyError:
  791. pass # this column not in results
  792. def _sqlalchemy_type(self, col):
  793. dtype = self.dtype or {}
  794. if col.name in dtype:
  795. return self.dtype[col.name]
  796. # Infer type of column, while ignoring missing values.
  797. # Needed for inserting typed data containing NULLs, GH 8778.
  798. col_type = lib.infer_dtype(col, skipna=True)
  799. from sqlalchemy.types import (
  800. BigInteger,
  801. Integer,
  802. Float,
  803. Text,
  804. Boolean,
  805. DateTime,
  806. Date,
  807. Time,
  808. TIMESTAMP,
  809. )
  810. if col_type == "datetime64" or col_type == "datetime":
  811. # GH 9086: TIMESTAMP is the suggested type if the column contains
  812. # timezone information
  813. try:
  814. if col.dt.tz is not None:
  815. return TIMESTAMP(timezone=True)
  816. except AttributeError:
  817. # The column is actually a DatetimeIndex
  818. if col.tz is not None:
  819. return TIMESTAMP(timezone=True)
  820. return DateTime
  821. if col_type == "timedelta64":
  822. warnings.warn(
  823. "the 'timedelta' type is not supported, and will be "
  824. "written as integer values (ns frequency) to the "
  825. "database.",
  826. UserWarning,
  827. stacklevel=8,
  828. )
  829. return BigInteger
  830. elif col_type == "floating":
  831. if col.dtype == "float32":
  832. return Float(precision=23)
  833. else:
  834. return Float(precision=53)
  835. elif col_type == "integer":
  836. if col.dtype == "int32":
  837. return Integer
  838. else:
  839. return BigInteger
  840. elif col_type == "boolean":
  841. return Boolean
  842. elif col_type == "date":
  843. return Date
  844. elif col_type == "time":
  845. return Time
  846. elif col_type == "complex":
  847. raise ValueError("Complex datatypes not supported")
  848. return Text
  849. def _get_dtype(self, sqltype):
  850. from sqlalchemy.types import Integer, Float, Boolean, DateTime, Date, TIMESTAMP
  851. if isinstance(sqltype, Float):
  852. return float
  853. elif isinstance(sqltype, Integer):
  854. # TODO: Refine integer size.
  855. return np.dtype("int64")
  856. elif isinstance(sqltype, TIMESTAMP):
  857. # we have a timezone capable type
  858. if not sqltype.timezone:
  859. return datetime
  860. return DatetimeTZDtype
  861. elif isinstance(sqltype, DateTime):
  862. # Caution: np.datetime64 is also a subclass of np.number.
  863. return datetime
  864. elif isinstance(sqltype, Date):
  865. return date
  866. elif isinstance(sqltype, Boolean):
  867. return bool
  868. return object
  869. class PandasSQL(PandasObject):
  870. """
  871. Subclasses Should define read_sql and to_sql.
  872. """
  873. def read_sql(self, *args, **kwargs):
  874. raise ValueError(
  875. "PandasSQL must be created with an SQLAlchemy "
  876. "connectable or sqlite connection"
  877. )
  878. def to_sql(self, *args, **kwargs):
  879. raise ValueError(
  880. "PandasSQL must be created with an SQLAlchemy "
  881. "connectable or sqlite connection"
  882. )
  883. class SQLDatabase(PandasSQL):
  884. """
  885. This class enables conversion between DataFrame and SQL databases
  886. using SQLAlchemy to handle DataBase abstraction.
  887. Parameters
  888. ----------
  889. engine : SQLAlchemy connectable
  890. Connectable to connect with the database. Using SQLAlchemy makes it
  891. possible to use any DB supported by that library.
  892. schema : string, default None
  893. Name of SQL schema in database to write to (if database flavor
  894. supports this). If None, use default schema (default).
  895. meta : SQLAlchemy MetaData object, default None
  896. If provided, this MetaData object is used instead of a newly
  897. created. This allows to specify database flavor specific
  898. arguments in the MetaData object.
  899. """
  900. def __init__(self, engine, schema=None, meta=None):
  901. self.connectable = engine
  902. if not meta:
  903. from sqlalchemy.schema import MetaData
  904. meta = MetaData(self.connectable, schema=schema)
  905. self.meta = meta
  906. @contextmanager
  907. def run_transaction(self):
  908. with self.connectable.begin() as tx:
  909. if hasattr(tx, "execute"):
  910. yield tx
  911. else:
  912. yield self.connectable
  913. def execute(self, *args, **kwargs):
  914. """Simple passthrough to SQLAlchemy connectable"""
  915. return self.connectable.execute(*args, **kwargs)
  916. def read_table(
  917. self,
  918. table_name,
  919. index_col=None,
  920. coerce_float=True,
  921. parse_dates=None,
  922. columns=None,
  923. schema=None,
  924. chunksize=None,
  925. ):
  926. """Read SQL database table into a DataFrame.
  927. Parameters
  928. ----------
  929. table_name : string
  930. Name of SQL table in database.
  931. index_col : string, optional, default: None
  932. Column to set as index.
  933. coerce_float : boolean, default True
  934. Attempts to convert values of non-string, non-numeric objects
  935. (like decimal.Decimal) to floating point. This can result in
  936. loss of precision.
  937. parse_dates : list or dict, default: None
  938. - List of column names to parse as dates.
  939. - Dict of ``{column_name: format string}`` where format string is
  940. strftime compatible in case of parsing string times, or is one of
  941. (D, s, ns, ms, us) in case of parsing integer timestamps.
  942. - Dict of ``{column_name: arg}``, where the arg corresponds
  943. to the keyword arguments of :func:`pandas.to_datetime`.
  944. Especially useful with databases without native Datetime support,
  945. such as SQLite.
  946. columns : list, default: None
  947. List of column names to select from SQL table.
  948. schema : string, default None
  949. Name of SQL schema in database to query (if database flavor
  950. supports this). If specified, this overwrites the default
  951. schema of the SQL database object.
  952. chunksize : int, default None
  953. If specified, return an iterator where `chunksize` is the number
  954. of rows to include in each chunk.
  955. Returns
  956. -------
  957. DataFrame
  958. See Also
  959. --------
  960. pandas.read_sql_table
  961. SQLDatabase.read_query
  962. """
  963. table = SQLTable(table_name, self, index=index_col, schema=schema)
  964. return table.read(
  965. coerce_float=coerce_float,
  966. parse_dates=parse_dates,
  967. columns=columns,
  968. chunksize=chunksize,
  969. )
  970. @staticmethod
  971. def _query_iterator(
  972. result, chunksize, columns, index_col=None, coerce_float=True, parse_dates=None
  973. ):
  974. """Return generator through chunked result set"""
  975. while True:
  976. data = result.fetchmany(chunksize)
  977. if not data:
  978. break
  979. else:
  980. yield _wrap_result(
  981. data,
  982. columns,
  983. index_col=index_col,
  984. coerce_float=coerce_float,
  985. parse_dates=parse_dates,
  986. )
  987. def read_query(
  988. self,
  989. sql,
  990. index_col=None,
  991. coerce_float=True,
  992. parse_dates=None,
  993. params=None,
  994. chunksize=None,
  995. ):
  996. """Read SQL query into a DataFrame.
  997. Parameters
  998. ----------
  999. sql : string
  1000. SQL query to be executed.
  1001. index_col : string, optional, default: None
  1002. Column name to use as index for the returned DataFrame object.
  1003. coerce_float : boolean, default True
  1004. Attempt to convert values of non-string, non-numeric objects (like
  1005. decimal.Decimal) to floating point, useful for SQL result sets.
  1006. params : list, tuple or dict, optional, default: None
  1007. List of parameters to pass to execute method. The syntax used
  1008. to pass parameters is database driver dependent. Check your
  1009. database driver documentation for which of the five syntax styles,
  1010. described in PEP 249's paramstyle, is supported.
  1011. Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
  1012. parse_dates : list or dict, default: None
  1013. - List of column names to parse as dates.
  1014. - Dict of ``{column_name: format string}`` where format string is
  1015. strftime compatible in case of parsing string times, or is one of
  1016. (D, s, ns, ms, us) in case of parsing integer timestamps.
  1017. - Dict of ``{column_name: arg dict}``, where the arg dict
  1018. corresponds to the keyword arguments of
  1019. :func:`pandas.to_datetime` Especially useful with databases
  1020. without native Datetime support, such as SQLite.
  1021. chunksize : int, default None
  1022. If specified, return an iterator where `chunksize` is the number
  1023. of rows to include in each chunk.
  1024. Returns
  1025. -------
  1026. DataFrame
  1027. See Also
  1028. --------
  1029. read_sql_table : Read SQL database table into a DataFrame.
  1030. read_sql
  1031. """
  1032. args = _convert_params(sql, params)
  1033. result = self.execute(*args)
  1034. columns = result.keys()
  1035. if chunksize is not None:
  1036. return self._query_iterator(
  1037. result,
  1038. chunksize,
  1039. columns,
  1040. index_col=index_col,
  1041. coerce_float=coerce_float,
  1042. parse_dates=parse_dates,
  1043. )
  1044. else:
  1045. data = result.fetchall()
  1046. frame = _wrap_result(
  1047. data,
  1048. columns,
  1049. index_col=index_col,
  1050. coerce_float=coerce_float,
  1051. parse_dates=parse_dates,
  1052. )
  1053. return frame
  1054. read_sql = read_query
  1055. def to_sql(
  1056. self,
  1057. frame,
  1058. name,
  1059. if_exists="fail",
  1060. index=True,
  1061. index_label=None,
  1062. schema=None,
  1063. chunksize=None,
  1064. dtype=None,
  1065. method=None,
  1066. ):
  1067. """
  1068. Write records stored in a DataFrame to a SQL database.
  1069. Parameters
  1070. ----------
  1071. frame : DataFrame
  1072. name : string
  1073. Name of SQL table.
  1074. if_exists : {'fail', 'replace', 'append'}, default 'fail'
  1075. - fail: If table exists, do nothing.
  1076. - replace: If table exists, drop it, recreate it, and insert data.
  1077. - append: If table exists, insert data. Create if does not exist.
  1078. index : boolean, default True
  1079. Write DataFrame index as a column.
  1080. index_label : string or sequence, default None
  1081. Column label for index column(s). If None is given (default) and
  1082. `index` is True, then the index names are used.
  1083. A sequence should be given if the DataFrame uses MultiIndex.
  1084. schema : string, default None
  1085. Name of SQL schema in database to write to (if database flavor
  1086. supports this). If specified, this overwrites the default
  1087. schema of the SQLDatabase object.
  1088. chunksize : int, default None
  1089. If not None, then rows will be written in batches of this size at a
  1090. time. If None, all rows will be written at once.
  1091. dtype : single type or dict of column name to SQL type, default None
  1092. Optional specifying the datatype for columns. The SQL type should
  1093. be a SQLAlchemy type. If all columns are of the same type, one
  1094. single value can be used.
  1095. method : {None', 'multi', callable}, default None
  1096. Controls the SQL insertion clause used:
  1097. * None : Uses standard SQL ``INSERT`` clause (one per row).
  1098. * 'multi': Pass multiple values in a single ``INSERT`` clause.
  1099. * callable with signature ``(pd_table, conn, keys, data_iter)``.
  1100. Details and a sample callable implementation can be found in the
  1101. section :ref:`insert method <io.sql.method>`.
  1102. .. versionadded:: 0.24.0
  1103. """
  1104. if dtype and not is_dict_like(dtype):
  1105. dtype = {col_name: dtype for col_name in frame}
  1106. if dtype is not None:
  1107. from sqlalchemy.types import to_instance, TypeEngine
  1108. for col, my_type in dtype.items():
  1109. if not isinstance(to_instance(my_type), TypeEngine):
  1110. raise ValueError(f"The type of {col} is not a SQLAlchemy type")
  1111. table = SQLTable(
  1112. name,
  1113. self,
  1114. frame=frame,
  1115. index=index,
  1116. if_exists=if_exists,
  1117. index_label=index_label,
  1118. schema=schema,
  1119. dtype=dtype,
  1120. )
  1121. table.create()
  1122. table.insert(chunksize, method=method)
  1123. if not name.isdigit() and not name.islower():
  1124. # check for potentially case sensitivity issues (GH7815)
  1125. # Only check when name is not a number and name is not lower case
  1126. engine = self.connectable.engine
  1127. with self.connectable.connect() as conn:
  1128. table_names = engine.table_names(
  1129. schema=schema or self.meta.schema, connection=conn
  1130. )
  1131. if name not in table_names:
  1132. msg = (
  1133. f"The provided table name '{name}' is not found exactly as "
  1134. "such in the database after writing the table, possibly "
  1135. "due to case sensitivity issues. Consider using lower "
  1136. "case table names."
  1137. )
  1138. warnings.warn(msg, UserWarning)
  1139. @property
  1140. def tables(self):
  1141. return self.meta.tables
  1142. def has_table(self, name, schema=None):
  1143. return self.connectable.run_callable(
  1144. self.connectable.dialect.has_table, name, schema or self.meta.schema
  1145. )
  1146. def get_table(self, table_name, schema=None):
  1147. schema = schema or self.meta.schema
  1148. if schema:
  1149. tbl = self.meta.tables.get(".".join([schema, table_name]))
  1150. else:
  1151. tbl = self.meta.tables.get(table_name)
  1152. # Avoid casting double-precision floats into decimals
  1153. from sqlalchemy import Numeric
  1154. for column in tbl.columns:
  1155. if isinstance(column.type, Numeric):
  1156. column.type.asdecimal = False
  1157. return tbl
  1158. def drop_table(self, table_name, schema=None):
  1159. schema = schema or self.meta.schema
  1160. if self.has_table(table_name, schema):
  1161. self.meta.reflect(only=[table_name], schema=schema)
  1162. self.get_table(table_name, schema).drop()
  1163. self.meta.clear()
  1164. def _create_sql_schema(self, frame, table_name, keys=None, dtype=None):
  1165. table = SQLTable(
  1166. table_name, self, frame=frame, index=False, keys=keys, dtype=dtype
  1167. )
  1168. return str(table.sql_schema())
  1169. # ---- SQL without SQLAlchemy ---
  1170. # sqlite-specific sql strings and handler class
  1171. # dictionary used for readability purposes
  1172. _SQL_TYPES = {
  1173. "string": "TEXT",
  1174. "floating": "REAL",
  1175. "integer": "INTEGER",
  1176. "datetime": "TIMESTAMP",
  1177. "date": "DATE",
  1178. "time": "TIME",
  1179. "boolean": "INTEGER",
  1180. }
  1181. def _get_unicode_name(name):
  1182. try:
  1183. uname = str(name).encode("utf-8", "strict").decode("utf-8")
  1184. except UnicodeError:
  1185. raise ValueError(f"Cannot convert identifier to UTF-8: '{name}'")
  1186. return uname
  1187. def _get_valid_sqlite_name(name):
  1188. # See https://stackoverflow.com/questions/6514274/how-do-you-escape-strings\
  1189. # -for-sqlite-table-column-names-in-python
  1190. # Ensure the string can be encoded as UTF-8.
  1191. # Ensure the string does not include any NUL characters.
  1192. # Replace all " with "".
  1193. # Wrap the entire thing in double quotes.
  1194. uname = _get_unicode_name(name)
  1195. if not len(uname):
  1196. raise ValueError("Empty table or column name specified")
  1197. nul_index = uname.find("\x00")
  1198. if nul_index >= 0:
  1199. raise ValueError("SQLite identifier cannot contain NULs")
  1200. return '"' + uname.replace('"', '""') + '"'
  1201. _SAFE_NAMES_WARNING = (
  1202. "The spaces in these column names will not be changed. "
  1203. "In pandas versions < 0.14, spaces were converted to "
  1204. "underscores."
  1205. )
  1206. class SQLiteTable(SQLTable):
  1207. """
  1208. Patch the SQLTable for fallback support.
  1209. Instead of a table variable just use the Create Table statement.
  1210. """
  1211. def __init__(self, *args, **kwargs):
  1212. # GH 8341
  1213. # register an adapter callable for datetime.time object
  1214. import sqlite3
  1215. # this will transform time(12,34,56,789) into '12:34:56.000789'
  1216. # (this is what sqlalchemy does)
  1217. sqlite3.register_adapter(time, lambda _: _.strftime("%H:%M:%S.%f"))
  1218. super().__init__(*args, **kwargs)
  1219. def sql_schema(self):
  1220. return str(";\n".join(self.table))
  1221. def _execute_create(self):
  1222. with self.pd_sql.run_transaction() as conn:
  1223. for stmt in self.table:
  1224. conn.execute(stmt)
  1225. def insert_statement(self):
  1226. names = list(map(str, self.frame.columns))
  1227. wld = "?" # wildcard char
  1228. escape = _get_valid_sqlite_name
  1229. if self.index is not None:
  1230. for idx in self.index[::-1]:
  1231. names.insert(0, idx)
  1232. bracketed_names = [escape(column) for column in names]
  1233. col_names = ",".join(bracketed_names)
  1234. wildcards = ",".join([wld] * len(names))
  1235. insert_statement = (
  1236. f"INSERT INTO {escape(self.name)} ({col_names}) VALUES ({wildcards})"
  1237. )
  1238. return insert_statement
  1239. def _execute_insert(self, conn, keys, data_iter):
  1240. data_list = list(data_iter)
  1241. conn.executemany(self.insert_statement(), data_list)
  1242. def _create_table_setup(self):
  1243. """
  1244. Return a list of SQL statements that creates a table reflecting the
  1245. structure of a DataFrame. The first entry will be a CREATE TABLE
  1246. statement while the rest will be CREATE INDEX statements.
  1247. """
  1248. column_names_and_types = self._get_column_names_and_types(self._sql_type_name)
  1249. pat = re.compile(r"\s+")
  1250. column_names = [col_name for col_name, _, _ in column_names_and_types]
  1251. if any(map(pat.search, column_names)):
  1252. warnings.warn(_SAFE_NAMES_WARNING, stacklevel=6)
  1253. escape = _get_valid_sqlite_name
  1254. create_tbl_stmts = [
  1255. escape(cname) + " " + ctype for cname, ctype, _ in column_names_and_types
  1256. ]
  1257. if self.keys is not None and len(self.keys):
  1258. if not is_list_like(self.keys):
  1259. keys = [self.keys]
  1260. else:
  1261. keys = self.keys
  1262. cnames_br = ", ".join(escape(c) for c in keys)
  1263. create_tbl_stmts.append(
  1264. f"CONSTRAINT {self.name}_pk PRIMARY KEY ({cnames_br})"
  1265. )
  1266. create_stmts = [
  1267. "CREATE TABLE "
  1268. + escape(self.name)
  1269. + " (\n"
  1270. + ",\n ".join(create_tbl_stmts)
  1271. + "\n)"
  1272. ]
  1273. ix_cols = [cname for cname, _, is_index in column_names_and_types if is_index]
  1274. if len(ix_cols):
  1275. cnames = "_".join(ix_cols)
  1276. cnames_br = ",".join(escape(c) for c in ix_cols)
  1277. create_stmts.append(
  1278. "CREATE INDEX "
  1279. + escape("ix_" + self.name + "_" + cnames)
  1280. + "ON "
  1281. + escape(self.name)
  1282. + " ("
  1283. + cnames_br
  1284. + ")"
  1285. )
  1286. return create_stmts
  1287. def _sql_type_name(self, col):
  1288. dtype = self.dtype or {}
  1289. if col.name in dtype:
  1290. return dtype[col.name]
  1291. # Infer type of column, while ignoring missing values.
  1292. # Needed for inserting typed data containing NULLs, GH 8778.
  1293. col_type = lib.infer_dtype(col, skipna=True)
  1294. if col_type == "timedelta64":
  1295. warnings.warn(
  1296. "the 'timedelta' type is not supported, and will be "
  1297. "written as integer values (ns frequency) to the "
  1298. "database.",
  1299. UserWarning,
  1300. stacklevel=8,
  1301. )
  1302. col_type = "integer"
  1303. elif col_type == "datetime64":
  1304. col_type = "datetime"
  1305. elif col_type == "empty":
  1306. col_type = "string"
  1307. elif col_type == "complex":
  1308. raise ValueError("Complex datatypes not supported")
  1309. if col_type not in _SQL_TYPES:
  1310. col_type = "string"
  1311. return _SQL_TYPES[col_type]
  1312. class SQLiteDatabase(PandasSQL):
  1313. """
  1314. Version of SQLDatabase to support SQLite connections (fallback without
  1315. SQLAlchemy). This should only be used internally.
  1316. Parameters
  1317. ----------
  1318. con : sqlite connection object
  1319. """
  1320. def __init__(self, con, is_cursor=False):
  1321. self.is_cursor = is_cursor
  1322. self.con = con
  1323. @contextmanager
  1324. def run_transaction(self):
  1325. cur = self.con.cursor()
  1326. try:
  1327. yield cur
  1328. self.con.commit()
  1329. except Exception:
  1330. self.con.rollback()
  1331. raise
  1332. finally:
  1333. cur.close()
  1334. def execute(self, *args, **kwargs):
  1335. if self.is_cursor:
  1336. cur = self.con
  1337. else:
  1338. cur = self.con.cursor()
  1339. try:
  1340. cur.execute(*args, **kwargs)
  1341. return cur
  1342. except Exception as exc:
  1343. try:
  1344. self.con.rollback()
  1345. except Exception as inner_exc: # pragma: no cover
  1346. ex = DatabaseError(
  1347. f"Execution failed on sql: {args[0]}\n{exc}\nunable to rollback"
  1348. )
  1349. raise ex from inner_exc
  1350. ex = DatabaseError(f"Execution failed on sql '{args[0]}': {exc}")
  1351. raise ex from exc
  1352. @staticmethod
  1353. def _query_iterator(
  1354. cursor, chunksize, columns, index_col=None, coerce_float=True, parse_dates=None
  1355. ):
  1356. """Return generator through chunked result set"""
  1357. while True:
  1358. data = cursor.fetchmany(chunksize)
  1359. if type(data) == tuple:
  1360. data = list(data)
  1361. if not data:
  1362. cursor.close()
  1363. break
  1364. else:
  1365. yield _wrap_result(
  1366. data,
  1367. columns,
  1368. index_col=index_col,
  1369. coerce_float=coerce_float,
  1370. parse_dates=parse_dates,
  1371. )
  1372. def read_query(
  1373. self,
  1374. sql,
  1375. index_col=None,
  1376. coerce_float=True,
  1377. params=None,
  1378. parse_dates=None,
  1379. chunksize=None,
  1380. ):
  1381. args = _convert_params(sql, params)
  1382. cursor = self.execute(*args)
  1383. columns = [col_desc[0] for col_desc in cursor.description]
  1384. if chunksize is not None:
  1385. return self._query_iterator(
  1386. cursor,
  1387. chunksize,
  1388. columns,
  1389. index_col=index_col,
  1390. coerce_float=coerce_float,
  1391. parse_dates=parse_dates,
  1392. )
  1393. else:
  1394. data = self._fetchall_as_list(cursor)
  1395. cursor.close()
  1396. frame = _wrap_result(
  1397. data,
  1398. columns,
  1399. index_col=index_col,
  1400. coerce_float=coerce_float,
  1401. parse_dates=parse_dates,
  1402. )
  1403. return frame
  1404. def _fetchall_as_list(self, cur):
  1405. result = cur.fetchall()
  1406. if not isinstance(result, list):
  1407. result = list(result)
  1408. return result
  1409. def to_sql(
  1410. self,
  1411. frame,
  1412. name,
  1413. if_exists="fail",
  1414. index=True,
  1415. index_label=None,
  1416. schema=None,
  1417. chunksize=None,
  1418. dtype=None,
  1419. method=None,
  1420. ):
  1421. """
  1422. Write records stored in a DataFrame to a SQL database.
  1423. Parameters
  1424. ----------
  1425. frame: DataFrame
  1426. name: string
  1427. Name of SQL table.
  1428. if_exists: {'fail', 'replace', 'append'}, default 'fail'
  1429. fail: If table exists, do nothing.
  1430. replace: If table exists, drop it, recreate it, and insert data.
  1431. append: If table exists, insert data. Create if it does not exist.
  1432. index : boolean, default True
  1433. Write DataFrame index as a column
  1434. index_label : string or sequence, default None
  1435. Column label for index column(s). If None is given (default) and
  1436. `index` is True, then the index names are used.
  1437. A sequence should be given if the DataFrame uses MultiIndex.
  1438. schema : string, default None
  1439. Ignored parameter included for compatibility with SQLAlchemy
  1440. version of ``to_sql``.
  1441. chunksize : int, default None
  1442. If not None, then rows will be written in batches of this
  1443. size at a time. If None, all rows will be written at once.
  1444. dtype : single type or dict of column name to SQL type, default None
  1445. Optional specifying the datatype for columns. The SQL type should
  1446. be a string. If all columns are of the same type, one single value
  1447. can be used.
  1448. method : {None, 'multi', callable}, default None
  1449. Controls the SQL insertion clause used:
  1450. * None : Uses standard SQL ``INSERT`` clause (one per row).
  1451. * 'multi': Pass multiple values in a single ``INSERT`` clause.
  1452. * callable with signature ``(pd_table, conn, keys, data_iter)``.
  1453. Details and a sample callable implementation can be found in the
  1454. section :ref:`insert method <io.sql.method>`.
  1455. .. versionadded:: 0.24.0
  1456. """
  1457. if dtype and not is_dict_like(dtype):
  1458. dtype = {col_name: dtype for col_name in frame}
  1459. if dtype is not None:
  1460. for col, my_type in dtype.items():
  1461. if not isinstance(my_type, str):
  1462. raise ValueError(f"{col} ({my_type}) not a string")
  1463. table = SQLiteTable(
  1464. name,
  1465. self,
  1466. frame=frame,
  1467. index=index,
  1468. if_exists=if_exists,
  1469. index_label=index_label,
  1470. dtype=dtype,
  1471. )
  1472. table.create()
  1473. table.insert(chunksize, method)
  1474. def has_table(self, name, schema=None):
  1475. # TODO(wesm): unused?
  1476. # escape = _get_valid_sqlite_name
  1477. # esc_name = escape(name)
  1478. wld = "?"
  1479. query = f"SELECT name FROM sqlite_master WHERE type='table' AND name={wld};"
  1480. return len(self.execute(query, [name]).fetchall()) > 0
  1481. def get_table(self, table_name, schema=None):
  1482. return None # not supported in fallback mode
  1483. def drop_table(self, name, schema=None):
  1484. drop_sql = f"DROP TABLE {_get_valid_sqlite_name(name)}"
  1485. self.execute(drop_sql)
  1486. def _create_sql_schema(self, frame, table_name, keys=None, dtype=None):
  1487. table = SQLiteTable(
  1488. table_name, self, frame=frame, index=False, keys=keys, dtype=dtype
  1489. )
  1490. return str(table.sql_schema())
  1491. def get_schema(frame, name, keys=None, con=None, dtype=None):
  1492. """
  1493. Get the SQL db table schema for the given frame.
  1494. Parameters
  1495. ----------
  1496. frame : DataFrame
  1497. name : string
  1498. name of SQL table
  1499. keys : string or sequence, default: None
  1500. columns to use a primary key
  1501. con: an open SQL database connection object or a SQLAlchemy connectable
  1502. Using SQLAlchemy makes it possible to use any DB supported by that
  1503. library, default: None
  1504. If a DBAPI2 object, only sqlite3 is supported.
  1505. dtype : dict of column name to SQL type, default None
  1506. Optional specifying the datatype for columns. The SQL type should
  1507. be a SQLAlchemy type, or a string for sqlite3 fallback connection.
  1508. """
  1509. pandas_sql = pandasSQL_builder(con=con)
  1510. return pandas_sql._create_sql_schema(frame, name, keys=keys, dtype=dtype)