sql.py 82 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447
  1. """
  2. Collection of query wrappers / abstractions to both facilitate data
  3. retrieval and to reduce dependency on DB-specific API.
  4. """
  5. from __future__ import annotations
  6. from abc import (
  7. ABC,
  8. abstractmethod,
  9. )
  10. from contextlib import (
  11. ExitStack,
  12. contextmanager,
  13. )
  14. from datetime import (
  15. date,
  16. datetime,
  17. time,
  18. )
  19. from functools import partial
  20. import re
  21. from typing import (
  22. TYPE_CHECKING,
  23. Any,
  24. Iterator,
  25. Literal,
  26. cast,
  27. overload,
  28. )
  29. import warnings
  30. import numpy as np
  31. from pandas._libs import lib
  32. from pandas._typing import (
  33. DateTimeErrorChoices,
  34. DtypeArg,
  35. DtypeBackend,
  36. IndexLabel,
  37. )
  38. from pandas.compat._optional import import_optional_dependency
  39. from pandas.errors import (
  40. AbstractMethodError,
  41. DatabaseError,
  42. )
  43. from pandas.util._exceptions import find_stack_level
  44. from pandas.util._validators import check_dtype_backend
  45. from pandas.core.dtypes.common import (
  46. is_datetime64tz_dtype,
  47. is_dict_like,
  48. is_integer,
  49. is_list_like,
  50. )
  51. from pandas.core.dtypes.dtypes import DatetimeTZDtype
  52. from pandas.core.dtypes.missing import isna
  53. from pandas import get_option
  54. from pandas.core.api import (
  55. DataFrame,
  56. Series,
  57. )
  58. from pandas.core.arrays import ArrowExtensionArray
  59. from pandas.core.base import PandasObject
  60. import pandas.core.common as com
  61. from pandas.core.internals.construction import convert_object_array
  62. from pandas.core.tools.datetimes import to_datetime
  63. if TYPE_CHECKING:
  64. from sqlalchemy import Table
  65. from sqlalchemy.sql.expression import (
  66. Select,
  67. TextClause,
  68. )
  69. # -----------------------------------------------------------------------------
  70. # -- Helper functions
  71. def _process_parse_dates_argument(parse_dates):
  72. """Process parse_dates argument for read_sql functions"""
  73. # handle non-list entries for parse_dates gracefully
  74. if parse_dates is True or parse_dates is None or parse_dates is False:
  75. parse_dates = []
  76. elif not hasattr(parse_dates, "__iter__"):
  77. parse_dates = [parse_dates]
  78. return parse_dates
  79. def _handle_date_column(
  80. col, utc: bool = False, format: str | dict[str, Any] | None = None
  81. ):
  82. if isinstance(format, dict):
  83. # GH35185 Allow custom error values in parse_dates argument of
  84. # read_sql like functions.
  85. # Format can take on custom to_datetime argument values such as
  86. # {"errors": "coerce"} or {"dayfirst": True}
  87. error: DateTimeErrorChoices = format.pop("errors", None) or "ignore"
  88. return to_datetime(col, errors=error, **format)
  89. else:
  90. # Allow passing of formatting string for integers
  91. # GH17855
  92. if format is None and (
  93. issubclass(col.dtype.type, np.floating)
  94. or issubclass(col.dtype.type, np.integer)
  95. ):
  96. format = "s"
  97. if format in ["D", "d", "h", "m", "s", "ms", "us", "ns"]:
  98. return to_datetime(col, errors="coerce", unit=format, utc=utc)
  99. elif is_datetime64tz_dtype(col.dtype):
  100. # coerce to UTC timezone
  101. # GH11216
  102. return to_datetime(col, utc=True)
  103. else:
  104. return to_datetime(col, errors="coerce", format=format, utc=utc)
  105. def _parse_date_columns(data_frame, parse_dates):
  106. """
  107. Force non-datetime columns to be read as such.
  108. Supports both string formatted and integer timestamp columns.
  109. """
  110. parse_dates = _process_parse_dates_argument(parse_dates)
  111. # we want to coerce datetime64_tz dtypes for now to UTC
  112. # we could in theory do a 'nice' conversion from a FixedOffset tz
  113. # GH11216
  114. for col_name, df_col in data_frame.items():
  115. if is_datetime64tz_dtype(df_col.dtype) or col_name in parse_dates:
  116. try:
  117. fmt = parse_dates[col_name]
  118. except TypeError:
  119. fmt = None
  120. data_frame[col_name] = _handle_date_column(df_col, format=fmt)
  121. return data_frame
  122. def _convert_arrays_to_dataframe(
  123. data,
  124. columns,
  125. coerce_float: bool = True,
  126. dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
  127. ) -> DataFrame:
  128. content = lib.to_object_array_tuples(data)
  129. arrays = convert_object_array(
  130. list(content.T),
  131. dtype=None,
  132. coerce_float=coerce_float,
  133. dtype_backend=dtype_backend,
  134. )
  135. if dtype_backend == "pyarrow":
  136. pa = import_optional_dependency("pyarrow")
  137. arrays = [
  138. ArrowExtensionArray(pa.array(arr, from_pandas=True)) for arr in arrays
  139. ]
  140. if arrays:
  141. df = DataFrame(dict(zip(list(range(len(columns))), arrays)))
  142. df.columns = columns
  143. return df
  144. else:
  145. return DataFrame(columns=columns)
  146. def _wrap_result(
  147. data,
  148. columns,
  149. index_col=None,
  150. coerce_float: bool = True,
  151. parse_dates=None,
  152. dtype: DtypeArg | None = None,
  153. dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
  154. ):
  155. """Wrap result set of query in a DataFrame."""
  156. frame = _convert_arrays_to_dataframe(data, columns, coerce_float, dtype_backend)
  157. if dtype:
  158. frame = frame.astype(dtype)
  159. frame = _parse_date_columns(frame, parse_dates)
  160. if index_col is not None:
  161. frame = frame.set_index(index_col)
  162. return frame
  163. def execute(sql, con, params=None):
  164. """
  165. Execute the given SQL query using the provided connection object.
  166. Parameters
  167. ----------
  168. sql : string
  169. SQL query to be executed.
  170. con : SQLAlchemy connection or sqlite3 connection
  171. If a DBAPI2 object, only sqlite3 is supported.
  172. params : list or tuple, optional, default: None
  173. List of parameters to pass to execute method.
  174. Returns
  175. -------
  176. Results Iterable
  177. """
  178. warnings.warn(
  179. "`pandas.io.sql.execute` is deprecated and "
  180. "will be removed in the future version.",
  181. FutureWarning,
  182. stacklevel=find_stack_level(),
  183. ) # GH50185
  184. sqlalchemy = import_optional_dependency("sqlalchemy", errors="ignore")
  185. if sqlalchemy is not None and isinstance(con, (str, sqlalchemy.engine.Engine)):
  186. raise TypeError("pandas.io.sql.execute requires a connection") # GH50185
  187. with pandasSQL_builder(con, need_transaction=True) as pandas_sql:
  188. return pandas_sql.execute(sql, params)
  189. # -----------------------------------------------------------------------------
  190. # -- Read and write to DataFrames
  191. @overload
  192. def read_sql_table(
  193. table_name,
  194. con,
  195. schema=...,
  196. index_col: str | list[str] | None = ...,
  197. coerce_float=...,
  198. parse_dates: list[str] | dict[str, str] | None = ...,
  199. columns: list[str] | None = ...,
  200. chunksize: None = ...,
  201. dtype_backend: DtypeBackend | lib.NoDefault = ...,
  202. ) -> DataFrame:
  203. ...
  204. @overload
  205. def read_sql_table(
  206. table_name,
  207. con,
  208. schema=...,
  209. index_col: str | list[str] | None = ...,
  210. coerce_float=...,
  211. parse_dates: list[str] | dict[str, str] | None = ...,
  212. columns: list[str] | None = ...,
  213. chunksize: int = ...,
  214. dtype_backend: DtypeBackend | lib.NoDefault = ...,
  215. ) -> Iterator[DataFrame]:
  216. ...
  217. def read_sql_table(
  218. table_name: str,
  219. con,
  220. schema: str | None = None,
  221. index_col: str | list[str] | None = None,
  222. coerce_float: bool = True,
  223. parse_dates: list[str] | dict[str, str] | None = None,
  224. columns: list[str] | None = None,
  225. chunksize: int | None = None,
  226. dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
  227. ) -> DataFrame | Iterator[DataFrame]:
  228. """
  229. Read SQL database table into a DataFrame.
  230. Given a table name and a SQLAlchemy connectable, returns a DataFrame.
  231. This function does not support DBAPI connections.
  232. Parameters
  233. ----------
  234. table_name : str
  235. Name of SQL table in database.
  236. con : SQLAlchemy connectable or str
  237. A database URI could be provided as str.
  238. SQLite DBAPI connection mode not supported.
  239. schema : str, default None
  240. Name of SQL schema in database to query (if database flavor
  241. supports this). Uses default schema if None (default).
  242. index_col : str or list of str, optional, default: None
  243. Column(s) to set as index(MultiIndex).
  244. coerce_float : bool, default True
  245. Attempts to convert values of non-string, non-numeric objects (like
  246. decimal.Decimal) to floating point. Can result in loss of Precision.
  247. parse_dates : list or dict, default None
  248. - List of column names to parse as dates.
  249. - Dict of ``{column_name: format string}`` where format string is
  250. strftime compatible in case of parsing string times or is one of
  251. (D, s, ns, ms, us) in case of parsing integer timestamps.
  252. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds
  253. to the keyword arguments of :func:`pandas.to_datetime`
  254. Especially useful with databases without native Datetime support,
  255. such as SQLite.
  256. columns : list, default None
  257. List of column names to select from SQL table.
  258. chunksize : int, default None
  259. If specified, returns an iterator where `chunksize` is the number of
  260. rows to include in each chunk.
  261. dtype_backend : {"numpy_nullable", "pyarrow"}, defaults to NumPy backed DataFrames
  262. Which dtype_backend to use, e.g. whether a DataFrame should have NumPy
  263. arrays, nullable dtypes are used for all dtypes that have a nullable
  264. implementation when "numpy_nullable" is set, pyarrow is used for all
  265. dtypes if "pyarrow" is set.
  266. The dtype_backends are still experimential.
  267. .. versionadded:: 2.0
  268. Returns
  269. -------
  270. DataFrame or Iterator[DataFrame]
  271. A SQL table is returned as two-dimensional data structure with labeled
  272. axes.
  273. See Also
  274. --------
  275. read_sql_query : Read SQL query into a DataFrame.
  276. read_sql : Read SQL query or database table into a DataFrame.
  277. Notes
  278. -----
  279. Any datetime values with time zone information will be converted to UTC.
  280. Examples
  281. --------
  282. >>> pd.read_sql_table('table_name', 'postgres:///db_name') # doctest:+SKIP
  283. """
  284. check_dtype_backend(dtype_backend)
  285. if dtype_backend is lib.no_default:
  286. dtype_backend = "numpy" # type: ignore[assignment]
  287. with pandasSQL_builder(con, schema=schema, need_transaction=True) as pandas_sql:
  288. if not pandas_sql.has_table(table_name):
  289. raise ValueError(f"Table {table_name} not found")
  290. table = pandas_sql.read_table(
  291. table_name,
  292. index_col=index_col,
  293. coerce_float=coerce_float,
  294. parse_dates=parse_dates,
  295. columns=columns,
  296. chunksize=chunksize,
  297. dtype_backend=dtype_backend,
  298. )
  299. if table is not None:
  300. return table
  301. else:
  302. raise ValueError(f"Table {table_name} not found", con)
  303. @overload
  304. def read_sql_query(
  305. sql,
  306. con,
  307. index_col: str | list[str] | None = ...,
  308. coerce_float=...,
  309. params: list[str] | dict[str, str] | None = ...,
  310. parse_dates: list[str] | dict[str, str] | None = ...,
  311. chunksize: None = ...,
  312. dtype: DtypeArg | None = ...,
  313. dtype_backend: DtypeBackend | lib.NoDefault = ...,
  314. ) -> DataFrame:
  315. ...
  316. @overload
  317. def read_sql_query(
  318. sql,
  319. con,
  320. index_col: str | list[str] | None = ...,
  321. coerce_float=...,
  322. params: list[str] | dict[str, str] | None = ...,
  323. parse_dates: list[str] | dict[str, str] | None = ...,
  324. chunksize: int = ...,
  325. dtype: DtypeArg | None = ...,
  326. dtype_backend: DtypeBackend | lib.NoDefault = ...,
  327. ) -> Iterator[DataFrame]:
  328. ...
  329. def read_sql_query(
  330. sql,
  331. con,
  332. index_col: str | list[str] | None = None,
  333. coerce_float: bool = True,
  334. params: list[str] | dict[str, str] | None = None,
  335. parse_dates: list[str] | dict[str, str] | None = None,
  336. chunksize: int | None = None,
  337. dtype: DtypeArg | None = None,
  338. dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
  339. ) -> DataFrame | Iterator[DataFrame]:
  340. """
  341. Read SQL query into a DataFrame.
  342. Returns a DataFrame corresponding to the result set of the query
  343. string. Optionally provide an `index_col` parameter to use one of the
  344. columns as the index, otherwise default integer index will be used.
  345. Parameters
  346. ----------
  347. sql : str SQL query or SQLAlchemy Selectable (select or text object)
  348. SQL query to be executed.
  349. con : SQLAlchemy connectable, str, or sqlite3 connection
  350. Using SQLAlchemy makes it possible to use any DB supported by that
  351. library. If a DBAPI2 object, only sqlite3 is supported.
  352. index_col : str or list of str, optional, default: None
  353. Column(s) to set as index(MultiIndex).
  354. coerce_float : bool, default True
  355. Attempts to convert values of non-string, non-numeric objects (like
  356. decimal.Decimal) to floating point. Useful for SQL result sets.
  357. params : list, tuple or dict, optional, default: None
  358. List of parameters to pass to execute method. The syntax used
  359. to pass parameters is database driver dependent. Check your
  360. database driver documentation for which of the five syntax styles,
  361. described in PEP 249's paramstyle, is supported.
  362. Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}.
  363. parse_dates : list or dict, default: None
  364. - List of column names to parse as dates.
  365. - Dict of ``{column_name: format string}`` where format string is
  366. strftime compatible in case of parsing string times, or is one of
  367. (D, s, ns, ms, us) in case of parsing integer timestamps.
  368. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds
  369. to the keyword arguments of :func:`pandas.to_datetime`
  370. Especially useful with databases without native Datetime support,
  371. such as SQLite.
  372. chunksize : int, default None
  373. If specified, return an iterator where `chunksize` is the number of
  374. rows to include in each chunk.
  375. dtype : Type name or dict of columns
  376. Data type for data or columns. E.g. np.float64 or
  377. {‘a’: np.float64, ‘b’: np.int32, ‘c’: ‘Int64’}.
  378. .. versionadded:: 1.3.0
  379. dtype_backend : {"numpy_nullable", "pyarrow"}, defaults to NumPy backed DataFrames
  380. Which dtype_backend to use, e.g. whether a DataFrame should have NumPy
  381. arrays, nullable dtypes are used for all dtypes that have a nullable
  382. implementation when "numpy_nullable" is set, pyarrow is used for all
  383. dtypes if "pyarrow" is set.
  384. The dtype_backends are still experimential.
  385. .. versionadded:: 2.0
  386. Returns
  387. -------
  388. DataFrame or Iterator[DataFrame]
  389. See Also
  390. --------
  391. read_sql_table : Read SQL database table into a DataFrame.
  392. read_sql : Read SQL query or database table into a DataFrame.
  393. Notes
  394. -----
  395. Any datetime values with time zone information parsed via the `parse_dates`
  396. parameter will be converted to UTC.
  397. """
  398. check_dtype_backend(dtype_backend)
  399. if dtype_backend is lib.no_default:
  400. dtype_backend = "numpy" # type: ignore[assignment]
  401. with pandasSQL_builder(con) as pandas_sql:
  402. return pandas_sql.read_query(
  403. sql,
  404. index_col=index_col,
  405. params=params,
  406. coerce_float=coerce_float,
  407. parse_dates=parse_dates,
  408. chunksize=chunksize,
  409. dtype=dtype,
  410. dtype_backend=dtype_backend,
  411. )
  412. @overload
  413. def read_sql(
  414. sql,
  415. con,
  416. index_col: str | list[str] | None = ...,
  417. coerce_float=...,
  418. params=...,
  419. parse_dates=...,
  420. columns: list[str] = ...,
  421. chunksize: None = ...,
  422. dtype_backend: DtypeBackend | lib.NoDefault = ...,
  423. dtype: DtypeArg | None = None,
  424. ) -> DataFrame:
  425. ...
  426. @overload
  427. def read_sql(
  428. sql,
  429. con,
  430. index_col: str | list[str] | None = ...,
  431. coerce_float=...,
  432. params=...,
  433. parse_dates=...,
  434. columns: list[str] = ...,
  435. chunksize: int = ...,
  436. dtype_backend: DtypeBackend | lib.NoDefault = ...,
  437. dtype: DtypeArg | None = None,
  438. ) -> Iterator[DataFrame]:
  439. ...
  440. def read_sql(
  441. sql,
  442. con,
  443. index_col: str | list[str] | None = None,
  444. coerce_float: bool = True,
  445. params=None,
  446. parse_dates=None,
  447. columns: list[str] | None = None,
  448. chunksize: int | None = None,
  449. dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
  450. dtype: DtypeArg | None = None,
  451. ) -> DataFrame | Iterator[DataFrame]:
  452. """
  453. Read SQL query or database table into a DataFrame.
  454. This function is a convenience wrapper around ``read_sql_table`` and
  455. ``read_sql_query`` (for backward compatibility). It will delegate
  456. to the specific function depending on the provided input. A SQL query
  457. will be routed to ``read_sql_query``, while a database table name will
  458. be routed to ``read_sql_table``. Note that the delegated function might
  459. have more specific notes about their functionality not listed here.
  460. Parameters
  461. ----------
  462. sql : str or SQLAlchemy Selectable (select or text object)
  463. SQL query to be executed or a table name.
  464. con : SQLAlchemy connectable, str, or sqlite3 connection
  465. Using SQLAlchemy makes it possible to use any DB supported by that
  466. library. If a DBAPI2 object, only sqlite3 is supported. The user is responsible
  467. for engine disposal and connection closure for the SQLAlchemy connectable; str
  468. connections are closed automatically. See
  469. `here <https://docs.sqlalchemy.org/en/13/core/connections.html>`_.
  470. index_col : str or list of str, optional, default: None
  471. Column(s) to set as index(MultiIndex).
  472. coerce_float : bool, default True
  473. Attempts to convert values of non-string, non-numeric objects (like
  474. decimal.Decimal) to floating point, useful for SQL result sets.
  475. params : list, tuple or dict, optional, default: None
  476. List of parameters to pass to execute method. The syntax used
  477. to pass parameters is database driver dependent. Check your
  478. database driver documentation for which of the five syntax styles,
  479. described in PEP 249's paramstyle, is supported.
  480. Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}.
  481. parse_dates : list or dict, default: None
  482. - List of column names to parse as dates.
  483. - Dict of ``{column_name: format string}`` where format string is
  484. strftime compatible in case of parsing string times, or is one of
  485. (D, s, ns, ms, us) in case of parsing integer timestamps.
  486. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds
  487. to the keyword arguments of :func:`pandas.to_datetime`
  488. Especially useful with databases without native Datetime support,
  489. such as SQLite.
  490. columns : list, default: None
  491. List of column names to select from SQL table (only used when reading
  492. a table).
  493. chunksize : int, default None
  494. If specified, return an iterator where `chunksize` is the
  495. number of rows to include in each chunk.
  496. dtype_backend : {"numpy_nullable", "pyarrow"}, defaults to NumPy backed DataFrames
  497. Which dtype_backend to use, e.g. whether a DataFrame should have NumPy
  498. arrays, nullable dtypes are used for all dtypes that have a nullable
  499. implementation when "numpy_nullable" is set, pyarrow is used for all
  500. dtypes if "pyarrow" is set.
  501. The dtype_backends are still experimential.
  502. .. versionadded:: 2.0
  503. dtype : Type name or dict of columns
  504. Data type for data or columns. E.g. np.float64 or
  505. {‘a’: np.float64, ‘b’: np.int32, ‘c’: ‘Int64’}.
  506. The argument is ignored if a table is passed instead of a query.
  507. .. versionadded:: 2.0.0
  508. Returns
  509. -------
  510. DataFrame or Iterator[DataFrame]
  511. See Also
  512. --------
  513. read_sql_table : Read SQL database table into a DataFrame.
  514. read_sql_query : Read SQL query into a DataFrame.
  515. Examples
  516. --------
  517. Read data from SQL via either a SQL query or a SQL tablename.
  518. When using a SQLite database only SQL queries are accepted,
  519. providing only the SQL tablename will result in an error.
  520. >>> from sqlite3 import connect
  521. >>> conn = connect(':memory:')
  522. >>> df = pd.DataFrame(data=[[0, '10/11/12'], [1, '12/11/10']],
  523. ... columns=['int_column', 'date_column'])
  524. >>> df.to_sql('test_data', conn)
  525. 2
  526. >>> pd.read_sql('SELECT int_column, date_column FROM test_data', conn)
  527. int_column date_column
  528. 0 0 10/11/12
  529. 1 1 12/11/10
  530. >>> pd.read_sql('test_data', 'postgres:///db_name') # doctest:+SKIP
  531. Apply date parsing to columns through the ``parse_dates`` argument
  532. The ``parse_dates`` argument calls ``pd.to_datetime`` on the provided columns.
  533. Custom argument values for applying ``pd.to_datetime`` on a column are specified
  534. via a dictionary format:
  535. >>> pd.read_sql('SELECT int_column, date_column FROM test_data',
  536. ... conn,
  537. ... parse_dates={"date_column": {"format": "%d/%m/%y"}})
  538. int_column date_column
  539. 0 0 2012-11-10
  540. 1 1 2010-11-12
  541. """
  542. check_dtype_backend(dtype_backend)
  543. if dtype_backend is lib.no_default:
  544. dtype_backend = "numpy" # type: ignore[assignment]
  545. with pandasSQL_builder(con) as pandas_sql:
  546. if isinstance(pandas_sql, SQLiteDatabase):
  547. return pandas_sql.read_query(
  548. sql,
  549. index_col=index_col,
  550. params=params,
  551. coerce_float=coerce_float,
  552. parse_dates=parse_dates,
  553. chunksize=chunksize,
  554. dtype_backend=dtype_backend, # type: ignore[arg-type]
  555. dtype=dtype,
  556. )
  557. try:
  558. _is_table_name = pandas_sql.has_table(sql)
  559. except Exception:
  560. # using generic exception to catch errors from sql drivers (GH24988)
  561. _is_table_name = False
  562. if _is_table_name:
  563. return pandas_sql.read_table(
  564. sql,
  565. index_col=index_col,
  566. coerce_float=coerce_float,
  567. parse_dates=parse_dates,
  568. columns=columns,
  569. chunksize=chunksize,
  570. dtype_backend=dtype_backend,
  571. )
  572. else:
  573. return pandas_sql.read_query(
  574. sql,
  575. index_col=index_col,
  576. params=params,
  577. coerce_float=coerce_float,
  578. parse_dates=parse_dates,
  579. chunksize=chunksize,
  580. dtype_backend=dtype_backend,
  581. dtype=dtype,
  582. )
  583. def to_sql(
  584. frame,
  585. name: str,
  586. con,
  587. schema: str | None = None,
  588. if_exists: Literal["fail", "replace", "append"] = "fail",
  589. index: bool = True,
  590. index_label: IndexLabel = None,
  591. chunksize: int | None = None,
  592. dtype: DtypeArg | None = None,
  593. method: str | None = None,
  594. engine: str = "auto",
  595. **engine_kwargs,
  596. ) -> int | None:
  597. """
  598. Write records stored in a DataFrame to a SQL database.
  599. Parameters
  600. ----------
  601. frame : DataFrame, Series
  602. name : str
  603. Name of SQL table.
  604. con : SQLAlchemy connectable(engine/connection) or database string URI
  605. or sqlite3 DBAPI2 connection
  606. Using SQLAlchemy makes it possible to use any DB supported by that
  607. library.
  608. If a DBAPI2 object, only sqlite3 is supported.
  609. schema : str, optional
  610. Name of SQL schema in database to write to (if database flavor
  611. supports this). If None, use default schema (default).
  612. if_exists : {'fail', 'replace', 'append'}, default 'fail'
  613. - fail: If table exists, do nothing.
  614. - replace: If table exists, drop it, recreate it, and insert data.
  615. - append: If table exists, insert data. Create if does not exist.
  616. index : bool, default True
  617. Write DataFrame index as a column.
  618. index_label : str or sequence, optional
  619. Column label for index column(s). If None is given (default) and
  620. `index` is True, then the index names are used.
  621. A sequence should be given if the DataFrame uses MultiIndex.
  622. chunksize : int, optional
  623. Specify the number of rows in each batch to be written at a time.
  624. By default, all rows will be written at once.
  625. dtype : dict or scalar, optional
  626. Specifying the datatype for columns. If a dictionary is used, the
  627. keys should be the column names and the values should be the
  628. SQLAlchemy types or strings for the sqlite3 fallback mode. If a
  629. scalar is provided, it will be applied to all columns.
  630. method : {None, 'multi', callable}, optional
  631. Controls the SQL insertion clause used:
  632. - None : Uses standard SQL ``INSERT`` clause (one per row).
  633. - ``'multi'``: Pass multiple values in a single ``INSERT`` clause.
  634. - callable with signature ``(pd_table, conn, keys, data_iter) -> int | None``.
  635. Details and a sample callable implementation can be found in the
  636. section :ref:`insert method <io.sql.method>`.
  637. engine : {'auto', 'sqlalchemy'}, default 'auto'
  638. SQL engine library to use. If 'auto', then the option
  639. ``io.sql.engine`` is used. The default ``io.sql.engine``
  640. behavior is 'sqlalchemy'
  641. .. versionadded:: 1.3.0
  642. **engine_kwargs
  643. Any additional kwargs are passed to the engine.
  644. Returns
  645. -------
  646. None or int
  647. Number of rows affected by to_sql. None is returned if the callable
  648. passed into ``method`` does not return an integer number of rows.
  649. .. versionadded:: 1.4.0
  650. Notes
  651. -----
  652. The returned rows affected is the sum of the ``rowcount`` attribute of ``sqlite3.Cursor``
  653. or SQLAlchemy connectable. The returned value may not reflect the exact number of written
  654. rows as stipulated in the
  655. `sqlite3 <https://docs.python.org/3/library/sqlite3.html#sqlite3.Cursor.rowcount>`__ or
  656. `SQLAlchemy <https://docs.sqlalchemy.org/en/14/core/connections.html#sqlalchemy.engine.BaseCursorResult.rowcount>`__
  657. """ # noqa:E501
  658. if if_exists not in ("fail", "replace", "append"):
  659. raise ValueError(f"'{if_exists}' is not valid for if_exists")
  660. if isinstance(frame, Series):
  661. frame = frame.to_frame()
  662. elif not isinstance(frame, DataFrame):
  663. raise NotImplementedError(
  664. "'frame' argument should be either a Series or a DataFrame"
  665. )
  666. with pandasSQL_builder(con, schema=schema, need_transaction=True) as pandas_sql:
  667. return pandas_sql.to_sql(
  668. frame,
  669. name,
  670. if_exists=if_exists,
  671. index=index,
  672. index_label=index_label,
  673. schema=schema,
  674. chunksize=chunksize,
  675. dtype=dtype,
  676. method=method,
  677. engine=engine,
  678. **engine_kwargs,
  679. )
  680. def has_table(table_name: str, con, schema: str | None = None) -> bool:
  681. """
  682. Check if DataBase has named table.
  683. Parameters
  684. ----------
  685. table_name: string
  686. Name of SQL table.
  687. con: SQLAlchemy connectable(engine/connection) or sqlite3 DBAPI2 connection
  688. Using SQLAlchemy makes it possible to use any DB supported by that
  689. library.
  690. If a DBAPI2 object, only sqlite3 is supported.
  691. schema : string, default None
  692. Name of SQL schema in database to write to (if database flavor supports
  693. this). If None, use default schema (default).
  694. Returns
  695. -------
  696. boolean
  697. """
  698. with pandasSQL_builder(con, schema=schema) as pandas_sql:
  699. return pandas_sql.has_table(table_name)
  700. table_exists = has_table
  701. def pandasSQL_builder(
  702. con,
  703. schema: str | None = None,
  704. need_transaction: bool = False,
  705. ) -> PandasSQL:
  706. """
  707. Convenience function to return the correct PandasSQL subclass based on the
  708. provided parameters. Also creates a sqlalchemy connection and transaction
  709. if necessary.
  710. """
  711. import sqlite3
  712. if isinstance(con, sqlite3.Connection) or con is None:
  713. return SQLiteDatabase(con)
  714. sqlalchemy = import_optional_dependency("sqlalchemy", errors="ignore")
  715. if isinstance(con, str) and sqlalchemy is None:
  716. raise ImportError("Using URI string without sqlalchemy installed.")
  717. if sqlalchemy is not None and isinstance(con, (str, sqlalchemy.engine.Connectable)):
  718. return SQLDatabase(con, schema, need_transaction)
  719. warnings.warn(
  720. "pandas only supports SQLAlchemy connectable (engine/connection) or "
  721. "database string URI or sqlite3 DBAPI2 connection. Other DBAPI2 "
  722. "objects are not tested. Please consider using SQLAlchemy.",
  723. UserWarning,
  724. stacklevel=find_stack_level(),
  725. )
  726. return SQLiteDatabase(con)
  727. class SQLTable(PandasObject):
  728. """
  729. For mapping Pandas tables to SQL tables.
  730. Uses fact that table is reflected by SQLAlchemy to
  731. do better type conversions.
  732. Also holds various flags needed to avoid having to
  733. pass them between functions all the time.
  734. """
  735. # TODO: support for multiIndex
  736. def __init__(
  737. self,
  738. name: str,
  739. pandas_sql_engine,
  740. frame=None,
  741. index: bool | str | list[str] | None = True,
  742. if_exists: Literal["fail", "replace", "append"] = "fail",
  743. prefix: str = "pandas",
  744. index_label=None,
  745. schema=None,
  746. keys=None,
  747. dtype: DtypeArg | None = None,
  748. ) -> None:
  749. self.name = name
  750. self.pd_sql = pandas_sql_engine
  751. self.prefix = prefix
  752. self.frame = frame
  753. self.index = self._index_name(index, index_label)
  754. self.schema = schema
  755. self.if_exists = if_exists
  756. self.keys = keys
  757. self.dtype = dtype
  758. if frame is not None:
  759. # We want to initialize based on a dataframe
  760. self.table = self._create_table_setup()
  761. else:
  762. # no data provided, read-only mode
  763. self.table = self.pd_sql.get_table(self.name, self.schema)
  764. if self.table is None:
  765. raise ValueError(f"Could not init table '{name}'")
  766. def exists(self):
  767. return self.pd_sql.has_table(self.name, self.schema)
  768. def sql_schema(self) -> str:
  769. from sqlalchemy.schema import CreateTable
  770. return str(CreateTable(self.table).compile(self.pd_sql.con))
  771. def _execute_create(self) -> None:
  772. # Inserting table into database, add to MetaData object
  773. self.table = self.table.to_metadata(self.pd_sql.meta)
  774. with self.pd_sql.run_transaction():
  775. self.table.create(bind=self.pd_sql.con)
  776. def create(self) -> None:
  777. if self.exists():
  778. if self.if_exists == "fail":
  779. raise ValueError(f"Table '{self.name}' already exists.")
  780. if self.if_exists == "replace":
  781. self.pd_sql.drop_table(self.name, self.schema)
  782. self._execute_create()
  783. elif self.if_exists == "append":
  784. pass
  785. else:
  786. raise ValueError(f"'{self.if_exists}' is not valid for if_exists")
  787. else:
  788. self._execute_create()
  789. def _execute_insert(self, conn, keys: list[str], data_iter) -> int:
  790. """
  791. Execute SQL statement inserting data
  792. Parameters
  793. ----------
  794. conn : sqlalchemy.engine.Engine or sqlalchemy.engine.Connection
  795. keys : list of str
  796. Column names
  797. data_iter : generator of list
  798. Each item contains a list of values to be inserted
  799. """
  800. data = [dict(zip(keys, row)) for row in data_iter]
  801. result = conn.execute(self.table.insert(), data)
  802. return result.rowcount
  803. def _execute_insert_multi(self, conn, keys: list[str], data_iter) -> int:
  804. """
  805. Alternative to _execute_insert for DBs support multivalue INSERT.
  806. Note: multi-value insert is usually faster for analytics DBs
  807. and tables containing a few columns
  808. but performance degrades quickly with increase of columns.
  809. """
  810. from sqlalchemy import insert
  811. data = [dict(zip(keys, row)) for row in data_iter]
  812. stmt = insert(self.table).values(data)
  813. result = conn.execute(stmt)
  814. return result.rowcount
  815. def insert_data(self) -> tuple[list[str], list[np.ndarray]]:
  816. if self.index is not None:
  817. temp = self.frame.copy()
  818. temp.index.names = self.index
  819. try:
  820. temp.reset_index(inplace=True)
  821. except ValueError as err:
  822. raise ValueError(f"duplicate name in index/columns: {err}") from err
  823. else:
  824. temp = self.frame
  825. column_names = list(map(str, temp.columns))
  826. ncols = len(column_names)
  827. # this just pre-allocates the list: None's will be replaced with ndarrays
  828. # error: List item 0 has incompatible type "None"; expected "ndarray"
  829. data_list: list[np.ndarray] = [None] * ncols # type: ignore[list-item]
  830. for i, (_, ser) in enumerate(temp.items()):
  831. if ser.dtype.kind == "M":
  832. d = ser.dt.to_pydatetime()
  833. elif ser.dtype.kind == "m":
  834. vals = ser._values
  835. if isinstance(vals, ArrowExtensionArray):
  836. vals = vals.to_numpy(dtype=np.dtype("m8[ns]"))
  837. # store as integers, see GH#6921, GH#7076
  838. d = vals.view("i8").astype(object)
  839. else:
  840. d = ser._values.astype(object)
  841. assert isinstance(d, np.ndarray), type(d)
  842. if ser._can_hold_na:
  843. # Note: this will miss timedeltas since they are converted to int
  844. mask = isna(d)
  845. d[mask] = None
  846. data_list[i] = d
  847. return column_names, data_list
  848. def insert(
  849. self, chunksize: int | None = None, method: str | None = None
  850. ) -> int | None:
  851. # set insert method
  852. if method is None:
  853. exec_insert = self._execute_insert
  854. elif method == "multi":
  855. exec_insert = self._execute_insert_multi
  856. elif callable(method):
  857. exec_insert = partial(method, self)
  858. else:
  859. raise ValueError(f"Invalid parameter `method`: {method}")
  860. keys, data_list = self.insert_data()
  861. nrows = len(self.frame)
  862. if nrows == 0:
  863. return 0
  864. if chunksize is None:
  865. chunksize = nrows
  866. elif chunksize == 0:
  867. raise ValueError("chunksize argument should be non-zero")
  868. chunks = (nrows // chunksize) + 1
  869. total_inserted = None
  870. with self.pd_sql.run_transaction() as conn:
  871. for i in range(chunks):
  872. start_i = i * chunksize
  873. end_i = min((i + 1) * chunksize, nrows)
  874. if start_i >= end_i:
  875. break
  876. chunk_iter = zip(*(arr[start_i:end_i] for arr in data_list))
  877. num_inserted = exec_insert(conn, keys, chunk_iter)
  878. # GH 46891
  879. if is_integer(num_inserted):
  880. if total_inserted is None:
  881. total_inserted = num_inserted
  882. else:
  883. total_inserted += num_inserted
  884. return total_inserted
  885. def _query_iterator(
  886. self,
  887. result,
  888. exit_stack: ExitStack,
  889. chunksize: str | None,
  890. columns,
  891. coerce_float: bool = True,
  892. parse_dates=None,
  893. dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
  894. ):
  895. """Return generator through chunked result set."""
  896. has_read_data = False
  897. with exit_stack:
  898. while True:
  899. data = result.fetchmany(chunksize)
  900. if not data:
  901. if not has_read_data:
  902. yield DataFrame.from_records(
  903. [], columns=columns, coerce_float=coerce_float
  904. )
  905. break
  906. has_read_data = True
  907. self.frame = _convert_arrays_to_dataframe(
  908. data, columns, coerce_float, dtype_backend
  909. )
  910. self._harmonize_columns(
  911. parse_dates=parse_dates, dtype_backend=dtype_backend
  912. )
  913. if self.index is not None:
  914. self.frame.set_index(self.index, inplace=True)
  915. yield self.frame
  916. def read(
  917. self,
  918. exit_stack: ExitStack,
  919. coerce_float: bool = True,
  920. parse_dates=None,
  921. columns=None,
  922. chunksize=None,
  923. dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
  924. ) -> DataFrame | Iterator[DataFrame]:
  925. from sqlalchemy import select
  926. if columns is not None and len(columns) > 0:
  927. cols = [self.table.c[n] for n in columns]
  928. if self.index is not None:
  929. for idx in self.index[::-1]:
  930. cols.insert(0, self.table.c[idx])
  931. sql_select = select(*cols)
  932. else:
  933. sql_select = select(self.table)
  934. result = self.pd_sql.execute(sql_select)
  935. column_names = result.keys()
  936. if chunksize is not None:
  937. return self._query_iterator(
  938. result,
  939. exit_stack,
  940. chunksize,
  941. column_names,
  942. coerce_float=coerce_float,
  943. parse_dates=parse_dates,
  944. dtype_backend=dtype_backend,
  945. )
  946. else:
  947. data = result.fetchall()
  948. self.frame = _convert_arrays_to_dataframe(
  949. data, column_names, coerce_float, dtype_backend
  950. )
  951. self._harmonize_columns(
  952. parse_dates=parse_dates, dtype_backend=dtype_backend
  953. )
  954. if self.index is not None:
  955. self.frame.set_index(self.index, inplace=True)
  956. return self.frame
  957. def _index_name(self, index, index_label):
  958. # for writing: index=True to include index in sql table
  959. if index is True:
  960. nlevels = self.frame.index.nlevels
  961. # if index_label is specified, set this as index name(s)
  962. if index_label is not None:
  963. if not isinstance(index_label, list):
  964. index_label = [index_label]
  965. if len(index_label) != nlevels:
  966. raise ValueError(
  967. "Length of 'index_label' should match number of "
  968. f"levels, which is {nlevels}"
  969. )
  970. return index_label
  971. # return the used column labels for the index columns
  972. if (
  973. nlevels == 1
  974. and "index" not in self.frame.columns
  975. and self.frame.index.name is None
  976. ):
  977. return ["index"]
  978. else:
  979. return com.fill_missing_names(self.frame.index.names)
  980. # for reading: index=(list of) string to specify column to set as index
  981. elif isinstance(index, str):
  982. return [index]
  983. elif isinstance(index, list):
  984. return index
  985. else:
  986. return None
  987. def _get_column_names_and_types(self, dtype_mapper):
  988. column_names_and_types = []
  989. if self.index is not None:
  990. for i, idx_label in enumerate(self.index):
  991. idx_type = dtype_mapper(self.frame.index._get_level_values(i))
  992. column_names_and_types.append((str(idx_label), idx_type, True))
  993. column_names_and_types += [
  994. (str(self.frame.columns[i]), dtype_mapper(self.frame.iloc[:, i]), False)
  995. for i in range(len(self.frame.columns))
  996. ]
  997. return column_names_and_types
  998. def _create_table_setup(self):
  999. from sqlalchemy import (
  1000. Column,
  1001. PrimaryKeyConstraint,
  1002. Table,
  1003. )
  1004. from sqlalchemy.schema import MetaData
  1005. column_names_and_types = self._get_column_names_and_types(self._sqlalchemy_type)
  1006. columns: list[Any] = [
  1007. Column(name, typ, index=is_index)
  1008. for name, typ, is_index in column_names_and_types
  1009. ]
  1010. if self.keys is not None:
  1011. if not is_list_like(self.keys):
  1012. keys = [self.keys]
  1013. else:
  1014. keys = self.keys
  1015. pkc = PrimaryKeyConstraint(*keys, name=self.name + "_pk")
  1016. columns.append(pkc)
  1017. schema = self.schema or self.pd_sql.meta.schema
  1018. # At this point, attach to new metadata, only attach to self.meta
  1019. # once table is created.
  1020. meta = MetaData()
  1021. return Table(self.name, meta, *columns, schema=schema)
  1022. def _harmonize_columns(
  1023. self,
  1024. parse_dates=None,
  1025. dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
  1026. ) -> None:
  1027. """
  1028. Make the DataFrame's column types align with the SQL table
  1029. column types.
  1030. Need to work around limited NA value support. Floats are always
  1031. fine, ints must always be floats if there are Null values.
  1032. Booleans are hard because converting bool column with None replaces
  1033. all Nones with false. Therefore only convert bool if there are no
  1034. NA values.
  1035. Datetimes should already be converted to np.datetime64 if supported,
  1036. but here we also force conversion if required.
  1037. """
  1038. parse_dates = _process_parse_dates_argument(parse_dates)
  1039. for sql_col in self.table.columns:
  1040. col_name = sql_col.name
  1041. try:
  1042. df_col = self.frame[col_name]
  1043. # Handle date parsing upfront; don't try to convert columns
  1044. # twice
  1045. if col_name in parse_dates:
  1046. try:
  1047. fmt = parse_dates[col_name]
  1048. except TypeError:
  1049. fmt = None
  1050. self.frame[col_name] = _handle_date_column(df_col, format=fmt)
  1051. continue
  1052. # the type the dataframe column should have
  1053. col_type = self._get_dtype(sql_col.type)
  1054. if (
  1055. col_type is datetime
  1056. or col_type is date
  1057. or col_type is DatetimeTZDtype
  1058. ):
  1059. # Convert tz-aware Datetime SQL columns to UTC
  1060. utc = col_type is DatetimeTZDtype
  1061. self.frame[col_name] = _handle_date_column(df_col, utc=utc)
  1062. elif dtype_backend == "numpy" and col_type is float:
  1063. # floats support NA, can always convert!
  1064. self.frame[col_name] = df_col.astype(col_type, copy=False)
  1065. elif dtype_backend == "numpy" and len(df_col) == df_col.count():
  1066. # No NA values, can convert ints and bools
  1067. if col_type is np.dtype("int64") or col_type is bool:
  1068. self.frame[col_name] = df_col.astype(col_type, copy=False)
  1069. except KeyError:
  1070. pass # this column not in results
  1071. def _sqlalchemy_type(self, col):
  1072. dtype: DtypeArg = self.dtype or {}
  1073. if is_dict_like(dtype):
  1074. dtype = cast(dict, dtype)
  1075. if col.name in dtype:
  1076. return dtype[col.name]
  1077. # Infer type of column, while ignoring missing values.
  1078. # Needed for inserting typed data containing NULLs, GH 8778.
  1079. col_type = lib.infer_dtype(col, skipna=True)
  1080. from sqlalchemy.types import (
  1081. TIMESTAMP,
  1082. BigInteger,
  1083. Boolean,
  1084. Date,
  1085. DateTime,
  1086. Float,
  1087. Integer,
  1088. SmallInteger,
  1089. Text,
  1090. Time,
  1091. )
  1092. if col_type in ("datetime64", "datetime"):
  1093. # GH 9086: TIMESTAMP is the suggested type if the column contains
  1094. # timezone information
  1095. try:
  1096. if col.dt.tz is not None:
  1097. return TIMESTAMP(timezone=True)
  1098. except AttributeError:
  1099. # The column is actually a DatetimeIndex
  1100. # GH 26761 or an Index with date-like data e.g. 9999-01-01
  1101. if getattr(col, "tz", None) is not None:
  1102. return TIMESTAMP(timezone=True)
  1103. return DateTime
  1104. if col_type == "timedelta64":
  1105. warnings.warn(
  1106. "the 'timedelta' type is not supported, and will be "
  1107. "written as integer values (ns frequency) to the database.",
  1108. UserWarning,
  1109. stacklevel=find_stack_level(),
  1110. )
  1111. return BigInteger
  1112. elif col_type == "floating":
  1113. if col.dtype == "float32":
  1114. return Float(precision=23)
  1115. else:
  1116. return Float(precision=53)
  1117. elif col_type == "integer":
  1118. # GH35076 Map pandas integer to optimal SQLAlchemy integer type
  1119. if col.dtype.name.lower() in ("int8", "uint8", "int16"):
  1120. return SmallInteger
  1121. elif col.dtype.name.lower() in ("uint16", "int32"):
  1122. return Integer
  1123. elif col.dtype.name.lower() == "uint64":
  1124. raise ValueError("Unsigned 64 bit integer datatype is not supported")
  1125. else:
  1126. return BigInteger
  1127. elif col_type == "boolean":
  1128. return Boolean
  1129. elif col_type == "date":
  1130. return Date
  1131. elif col_type == "time":
  1132. return Time
  1133. elif col_type == "complex":
  1134. raise ValueError("Complex datatypes not supported")
  1135. return Text
  1136. def _get_dtype(self, sqltype):
  1137. from sqlalchemy.types import (
  1138. TIMESTAMP,
  1139. Boolean,
  1140. Date,
  1141. DateTime,
  1142. Float,
  1143. Integer,
  1144. )
  1145. if isinstance(sqltype, Float):
  1146. return float
  1147. elif isinstance(sqltype, Integer):
  1148. # TODO: Refine integer size.
  1149. return np.dtype("int64")
  1150. elif isinstance(sqltype, TIMESTAMP):
  1151. # we have a timezone capable type
  1152. if not sqltype.timezone:
  1153. return datetime
  1154. return DatetimeTZDtype
  1155. elif isinstance(sqltype, DateTime):
  1156. # Caution: np.datetime64 is also a subclass of np.number.
  1157. return datetime
  1158. elif isinstance(sqltype, Date):
  1159. return date
  1160. elif isinstance(sqltype, Boolean):
  1161. return bool
  1162. return object
  1163. class PandasSQL(PandasObject, ABC):
  1164. """
  1165. Subclasses Should define read_query and to_sql.
  1166. """
  1167. def __enter__(self):
  1168. return self
  1169. def __exit__(self, *args) -> None:
  1170. pass
  1171. def read_table(
  1172. self,
  1173. table_name: str,
  1174. index_col: str | list[str] | None = None,
  1175. coerce_float: bool = True,
  1176. parse_dates=None,
  1177. columns=None,
  1178. schema: str | None = None,
  1179. chunksize: int | None = None,
  1180. dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
  1181. ) -> DataFrame | Iterator[DataFrame]:
  1182. raise NotImplementedError
  1183. @abstractmethod
  1184. def read_query(
  1185. self,
  1186. sql: str,
  1187. index_col: str | list[str] | None = None,
  1188. coerce_float: bool = True,
  1189. parse_dates=None,
  1190. params=None,
  1191. chunksize: int | None = None,
  1192. dtype: DtypeArg | None = None,
  1193. dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
  1194. ) -> DataFrame | Iterator[DataFrame]:
  1195. pass
  1196. @abstractmethod
  1197. def to_sql(
  1198. self,
  1199. frame,
  1200. name,
  1201. if_exists: Literal["fail", "replace", "append"] = "fail",
  1202. index: bool = True,
  1203. index_label=None,
  1204. schema=None,
  1205. chunksize=None,
  1206. dtype: DtypeArg | None = None,
  1207. method=None,
  1208. engine: str = "auto",
  1209. **engine_kwargs,
  1210. ) -> int | None:
  1211. pass
  1212. @abstractmethod
  1213. def execute(self, sql: str | Select | TextClause, params=None):
  1214. pass
  1215. @abstractmethod
  1216. def has_table(self, name: str, schema: str | None = None) -> bool:
  1217. pass
  1218. @abstractmethod
  1219. def _create_sql_schema(
  1220. self,
  1221. frame: DataFrame,
  1222. table_name: str,
  1223. keys: list[str] | None = None,
  1224. dtype: DtypeArg | None = None,
  1225. schema: str | None = None,
  1226. ):
  1227. pass
  1228. class BaseEngine:
  1229. def insert_records(
  1230. self,
  1231. table: SQLTable,
  1232. con,
  1233. frame,
  1234. name,
  1235. index: bool | str | list[str] | None = True,
  1236. schema=None,
  1237. chunksize=None,
  1238. method=None,
  1239. **engine_kwargs,
  1240. ) -> int | None:
  1241. """
  1242. Inserts data into already-prepared table
  1243. """
  1244. raise AbstractMethodError(self)
  1245. class SQLAlchemyEngine(BaseEngine):
  1246. def __init__(self) -> None:
  1247. import_optional_dependency(
  1248. "sqlalchemy", extra="sqlalchemy is required for SQL support."
  1249. )
  1250. def insert_records(
  1251. self,
  1252. table: SQLTable,
  1253. con,
  1254. frame,
  1255. name,
  1256. index: bool | str | list[str] | None = True,
  1257. schema=None,
  1258. chunksize=None,
  1259. method=None,
  1260. **engine_kwargs,
  1261. ) -> int | None:
  1262. from sqlalchemy import exc
  1263. try:
  1264. return table.insert(chunksize=chunksize, method=method)
  1265. except exc.StatementError as err:
  1266. # GH34431
  1267. # https://stackoverflow.com/a/67358288/6067848
  1268. msg = r"""(\(1054, "Unknown column 'inf(e0)?' in 'field list'"\))(?#
  1269. )|inf can not be used with MySQL"""
  1270. err_text = str(err.orig)
  1271. if re.search(msg, err_text):
  1272. raise ValueError("inf cannot be used with MySQL") from err
  1273. raise err
  1274. def get_engine(engine: str) -> BaseEngine:
  1275. """return our implementation"""
  1276. if engine == "auto":
  1277. engine = get_option("io.sql.engine")
  1278. if engine == "auto":
  1279. # try engines in this order
  1280. engine_classes = [SQLAlchemyEngine]
  1281. error_msgs = ""
  1282. for engine_class in engine_classes:
  1283. try:
  1284. return engine_class()
  1285. except ImportError as err:
  1286. error_msgs += "\n - " + str(err)
  1287. raise ImportError(
  1288. "Unable to find a usable engine; "
  1289. "tried using: 'sqlalchemy'.\n"
  1290. "A suitable version of "
  1291. "sqlalchemy is required for sql I/O "
  1292. "support.\n"
  1293. "Trying to import the above resulted in these errors:"
  1294. f"{error_msgs}"
  1295. )
  1296. if engine == "sqlalchemy":
  1297. return SQLAlchemyEngine()
  1298. raise ValueError("engine must be one of 'auto', 'sqlalchemy'")
  1299. class SQLDatabase(PandasSQL):
  1300. """
  1301. This class enables conversion between DataFrame and SQL databases
  1302. using SQLAlchemy to handle DataBase abstraction.
  1303. Parameters
  1304. ----------
  1305. con : SQLAlchemy Connectable or URI string.
  1306. Connectable to connect with the database. Using SQLAlchemy makes it
  1307. possible to use any DB supported by that library.
  1308. schema : string, default None
  1309. Name of SQL schema in database to write to (if database flavor
  1310. supports this). If None, use default schema (default).
  1311. need_transaction : bool, default False
  1312. If True, SQLDatabase will create a transaction.
  1313. """
  1314. def __init__(
  1315. self, con, schema: str | None = None, need_transaction: bool = False
  1316. ) -> None:
  1317. from sqlalchemy import create_engine
  1318. from sqlalchemy.engine import Engine
  1319. from sqlalchemy.schema import MetaData
  1320. # self.exit_stack cleans up the Engine and Connection and commits the
  1321. # transaction if any of those objects was created below.
  1322. # Cleanup happens either in self.__exit__ or at the end of the iterator
  1323. # returned by read_sql when chunksize is not None.
  1324. self.exit_stack = ExitStack()
  1325. if isinstance(con, str):
  1326. con = create_engine(con)
  1327. self.exit_stack.callback(con.dispose)
  1328. if isinstance(con, Engine):
  1329. con = self.exit_stack.enter_context(con.connect())
  1330. if need_transaction and not con.in_transaction():
  1331. self.exit_stack.enter_context(con.begin())
  1332. self.con = con
  1333. self.meta = MetaData(schema=schema)
  1334. self.returns_generator = False
  1335. def __exit__(self, *args) -> None:
  1336. if not self.returns_generator:
  1337. self.exit_stack.close()
  1338. @contextmanager
  1339. def run_transaction(self):
  1340. if not self.con.in_transaction():
  1341. with self.con.begin():
  1342. yield self.con
  1343. else:
  1344. yield self.con
  1345. def execute(self, sql: str | Select | TextClause, params=None):
  1346. """Simple passthrough to SQLAlchemy connectable"""
  1347. args = [] if params is None else [params]
  1348. if isinstance(sql, str):
  1349. return self.con.exec_driver_sql(sql, *args)
  1350. return self.con.execute(sql, *args)
  1351. def read_table(
  1352. self,
  1353. table_name: str,
  1354. index_col: str | list[str] | None = None,
  1355. coerce_float: bool = True,
  1356. parse_dates=None,
  1357. columns=None,
  1358. schema: str | None = None,
  1359. chunksize: int | None = None,
  1360. dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
  1361. ) -> DataFrame | Iterator[DataFrame]:
  1362. """
  1363. Read SQL database table into a DataFrame.
  1364. Parameters
  1365. ----------
  1366. table_name : str
  1367. Name of SQL table in database.
  1368. index_col : string, optional, default: None
  1369. Column to set as index.
  1370. coerce_float : bool, default True
  1371. Attempts to convert values of non-string, non-numeric objects
  1372. (like decimal.Decimal) to floating point. This can result in
  1373. loss of precision.
  1374. parse_dates : list or dict, default: None
  1375. - List of column names to parse as dates.
  1376. - Dict of ``{column_name: format string}`` where format string is
  1377. strftime compatible in case of parsing string times, or is one of
  1378. (D, s, ns, ms, us) in case of parsing integer timestamps.
  1379. - Dict of ``{column_name: arg}``, where the arg corresponds
  1380. to the keyword arguments of :func:`pandas.to_datetime`.
  1381. Especially useful with databases without native Datetime support,
  1382. such as SQLite.
  1383. columns : list, default: None
  1384. List of column names to select from SQL table.
  1385. schema : string, default None
  1386. Name of SQL schema in database to query (if database flavor
  1387. supports this). If specified, this overwrites the default
  1388. schema of the SQL database object.
  1389. chunksize : int, default None
  1390. If specified, return an iterator where `chunksize` is the number
  1391. of rows to include in each chunk.
  1392. dtype_backend : {{"numpy_nullable", "pyarrow"}}, defaults to NumPy dtypes
  1393. Which dtype_backend to use, e.g. whether a DataFrame should have NumPy
  1394. arrays, nullable dtypes are used for all dtypes that have a nullable
  1395. implementation when "numpy_nullable" is set, pyarrow is used for all
  1396. dtypes if "pyarrow" is set.
  1397. The dtype_backends are still experimential.
  1398. .. versionadded:: 2.0
  1399. Returns
  1400. -------
  1401. DataFrame
  1402. See Also
  1403. --------
  1404. pandas.read_sql_table
  1405. SQLDatabase.read_query
  1406. """
  1407. self.meta.reflect(bind=self.con, only=[table_name])
  1408. table = SQLTable(table_name, self, index=index_col, schema=schema)
  1409. if chunksize is not None:
  1410. self.returns_generator = True
  1411. return table.read(
  1412. self.exit_stack,
  1413. coerce_float=coerce_float,
  1414. parse_dates=parse_dates,
  1415. columns=columns,
  1416. chunksize=chunksize,
  1417. dtype_backend=dtype_backend,
  1418. )
  1419. @staticmethod
  1420. def _query_iterator(
  1421. result,
  1422. exit_stack: ExitStack,
  1423. chunksize: int,
  1424. columns,
  1425. index_col=None,
  1426. coerce_float: bool = True,
  1427. parse_dates=None,
  1428. dtype: DtypeArg | None = None,
  1429. dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
  1430. ):
  1431. """Return generator through chunked result set"""
  1432. has_read_data = False
  1433. with exit_stack:
  1434. while True:
  1435. data = result.fetchmany(chunksize)
  1436. if not data:
  1437. if not has_read_data:
  1438. yield _wrap_result(
  1439. [],
  1440. columns,
  1441. index_col=index_col,
  1442. coerce_float=coerce_float,
  1443. parse_dates=parse_dates,
  1444. dtype=dtype,
  1445. dtype_backend=dtype_backend,
  1446. )
  1447. break
  1448. has_read_data = True
  1449. yield _wrap_result(
  1450. data,
  1451. columns,
  1452. index_col=index_col,
  1453. coerce_float=coerce_float,
  1454. parse_dates=parse_dates,
  1455. dtype=dtype,
  1456. dtype_backend=dtype_backend,
  1457. )
  1458. def read_query(
  1459. self,
  1460. sql: str,
  1461. index_col: str | list[str] | None = None,
  1462. coerce_float: bool = True,
  1463. parse_dates=None,
  1464. params=None,
  1465. chunksize: int | None = None,
  1466. dtype: DtypeArg | None = None,
  1467. dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
  1468. ) -> DataFrame | Iterator[DataFrame]:
  1469. """
  1470. Read SQL query into a DataFrame.
  1471. Parameters
  1472. ----------
  1473. sql : str
  1474. SQL query to be executed.
  1475. index_col : string, optional, default: None
  1476. Column name to use as index for the returned DataFrame object.
  1477. coerce_float : bool, default True
  1478. Attempt to convert values of non-string, non-numeric objects (like
  1479. decimal.Decimal) to floating point, useful for SQL result sets.
  1480. params : list, tuple or dict, optional, default: None
  1481. List of parameters to pass to execute method. The syntax used
  1482. to pass parameters is database driver dependent. Check your
  1483. database driver documentation for which of the five syntax styles,
  1484. described in PEP 249's paramstyle, is supported.
  1485. Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
  1486. parse_dates : list or dict, default: None
  1487. - List of column names to parse as dates.
  1488. - Dict of ``{column_name: format string}`` where format string is
  1489. strftime compatible in case of parsing string times, or is one of
  1490. (D, s, ns, ms, us) in case of parsing integer timestamps.
  1491. - Dict of ``{column_name: arg dict}``, where the arg dict
  1492. corresponds to the keyword arguments of
  1493. :func:`pandas.to_datetime` Especially useful with databases
  1494. without native Datetime support, such as SQLite.
  1495. chunksize : int, default None
  1496. If specified, return an iterator where `chunksize` is the number
  1497. of rows to include in each chunk.
  1498. dtype : Type name or dict of columns
  1499. Data type for data or columns. E.g. np.float64 or
  1500. {‘a’: np.float64, ‘b’: np.int32, ‘c’: ‘Int64’}
  1501. .. versionadded:: 1.3.0
  1502. Returns
  1503. -------
  1504. DataFrame
  1505. See Also
  1506. --------
  1507. read_sql_table : Read SQL database table into a DataFrame.
  1508. read_sql
  1509. """
  1510. result = self.execute(sql, params)
  1511. columns = result.keys()
  1512. if chunksize is not None:
  1513. self.returns_generator = True
  1514. return self._query_iterator(
  1515. result,
  1516. self.exit_stack,
  1517. chunksize,
  1518. columns,
  1519. index_col=index_col,
  1520. coerce_float=coerce_float,
  1521. parse_dates=parse_dates,
  1522. dtype=dtype,
  1523. dtype_backend=dtype_backend,
  1524. )
  1525. else:
  1526. data = result.fetchall()
  1527. frame = _wrap_result(
  1528. data,
  1529. columns,
  1530. index_col=index_col,
  1531. coerce_float=coerce_float,
  1532. parse_dates=parse_dates,
  1533. dtype=dtype,
  1534. dtype_backend=dtype_backend,
  1535. )
  1536. return frame
  1537. read_sql = read_query
  1538. def prep_table(
  1539. self,
  1540. frame,
  1541. name,
  1542. if_exists: Literal["fail", "replace", "append"] = "fail",
  1543. index: bool | str | list[str] | None = True,
  1544. index_label=None,
  1545. schema=None,
  1546. dtype: DtypeArg | None = None,
  1547. ) -> SQLTable:
  1548. """
  1549. Prepares table in the database for data insertion. Creates it if needed, etc.
  1550. """
  1551. if dtype:
  1552. if not is_dict_like(dtype):
  1553. # error: Value expression in dictionary comprehension has incompatible
  1554. # type "Union[ExtensionDtype, str, dtype[Any], Type[object],
  1555. # Dict[Hashable, Union[ExtensionDtype, Union[str, dtype[Any]],
  1556. # Type[str], Type[float], Type[int], Type[complex], Type[bool],
  1557. # Type[object]]]]"; expected type "Union[ExtensionDtype, str,
  1558. # dtype[Any], Type[object]]"
  1559. dtype = {col_name: dtype for col_name in frame} # type: ignore[misc]
  1560. else:
  1561. dtype = cast(dict, dtype)
  1562. from sqlalchemy.types import TypeEngine
  1563. for col, my_type in dtype.items():
  1564. if isinstance(my_type, type) and issubclass(my_type, TypeEngine):
  1565. pass
  1566. elif isinstance(my_type, TypeEngine):
  1567. pass
  1568. else:
  1569. raise ValueError(f"The type of {col} is not a SQLAlchemy type")
  1570. table = SQLTable(
  1571. name,
  1572. self,
  1573. frame=frame,
  1574. index=index,
  1575. if_exists=if_exists,
  1576. index_label=index_label,
  1577. schema=schema,
  1578. dtype=dtype,
  1579. )
  1580. table.create()
  1581. return table
  1582. def check_case_sensitive(
  1583. self,
  1584. name: str,
  1585. schema: str | None,
  1586. ) -> None:
  1587. """
  1588. Checks table name for issues with case-sensitivity.
  1589. Method is called after data is inserted.
  1590. """
  1591. if not name.isdigit() and not name.islower():
  1592. # check for potentially case sensitivity issues (GH7815)
  1593. # Only check when name is not a number and name is not lower case
  1594. from sqlalchemy import inspect as sqlalchemy_inspect
  1595. insp = sqlalchemy_inspect(self.con)
  1596. table_names = insp.get_table_names(schema=schema or self.meta.schema)
  1597. if name not in table_names:
  1598. msg = (
  1599. f"The provided table name '{name}' is not found exactly as "
  1600. "such in the database after writing the table, possibly "
  1601. "due to case sensitivity issues. Consider using lower "
  1602. "case table names."
  1603. )
  1604. warnings.warn(
  1605. msg,
  1606. UserWarning,
  1607. stacklevel=find_stack_level(),
  1608. )
  1609. def to_sql(
  1610. self,
  1611. frame,
  1612. name: str,
  1613. if_exists: Literal["fail", "replace", "append"] = "fail",
  1614. index: bool = True,
  1615. index_label=None,
  1616. schema: str | None = None,
  1617. chunksize=None,
  1618. dtype: DtypeArg | None = None,
  1619. method=None,
  1620. engine: str = "auto",
  1621. **engine_kwargs,
  1622. ) -> int | None:
  1623. """
  1624. Write records stored in a DataFrame to a SQL database.
  1625. Parameters
  1626. ----------
  1627. frame : DataFrame
  1628. name : string
  1629. Name of SQL table.
  1630. if_exists : {'fail', 'replace', 'append'}, default 'fail'
  1631. - fail: If table exists, do nothing.
  1632. - replace: If table exists, drop it, recreate it, and insert data.
  1633. - append: If table exists, insert data. Create if does not exist.
  1634. index : boolean, default True
  1635. Write DataFrame index as a column.
  1636. index_label : string or sequence, default None
  1637. Column label for index column(s). If None is given (default) and
  1638. `index` is True, then the index names are used.
  1639. A sequence should be given if the DataFrame uses MultiIndex.
  1640. schema : string, default None
  1641. Name of SQL schema in database to write to (if database flavor
  1642. supports this). If specified, this overwrites the default
  1643. schema of the SQLDatabase object.
  1644. chunksize : int, default None
  1645. If not None, then rows will be written in batches of this size at a
  1646. time. If None, all rows will be written at once.
  1647. dtype : single type or dict of column name to SQL type, default None
  1648. Optional specifying the datatype for columns. The SQL type should
  1649. be a SQLAlchemy type. If all columns are of the same type, one
  1650. single value can be used.
  1651. method : {None', 'multi', callable}, default None
  1652. Controls the SQL insertion clause used:
  1653. * None : Uses standard SQL ``INSERT`` clause (one per row).
  1654. * 'multi': Pass multiple values in a single ``INSERT`` clause.
  1655. * callable with signature ``(pd_table, conn, keys, data_iter)``.
  1656. Details and a sample callable implementation can be found in the
  1657. section :ref:`insert method <io.sql.method>`.
  1658. engine : {'auto', 'sqlalchemy'}, default 'auto'
  1659. SQL engine library to use. If 'auto', then the option
  1660. ``io.sql.engine`` is used. The default ``io.sql.engine``
  1661. behavior is 'sqlalchemy'
  1662. .. versionadded:: 1.3.0
  1663. **engine_kwargs
  1664. Any additional kwargs are passed to the engine.
  1665. """
  1666. sql_engine = get_engine(engine)
  1667. table = self.prep_table(
  1668. frame=frame,
  1669. name=name,
  1670. if_exists=if_exists,
  1671. index=index,
  1672. index_label=index_label,
  1673. schema=schema,
  1674. dtype=dtype,
  1675. )
  1676. total_inserted = sql_engine.insert_records(
  1677. table=table,
  1678. con=self.con,
  1679. frame=frame,
  1680. name=name,
  1681. index=index,
  1682. schema=schema,
  1683. chunksize=chunksize,
  1684. method=method,
  1685. **engine_kwargs,
  1686. )
  1687. self.check_case_sensitive(name=name, schema=schema)
  1688. return total_inserted
  1689. @property
  1690. def tables(self):
  1691. return self.meta.tables
  1692. def has_table(self, name: str, schema: str | None = None) -> bool:
  1693. from sqlalchemy import inspect as sqlalchemy_inspect
  1694. insp = sqlalchemy_inspect(self.con)
  1695. return insp.has_table(name, schema or self.meta.schema)
  1696. def get_table(self, table_name: str, schema: str | None = None) -> Table:
  1697. from sqlalchemy import (
  1698. Numeric,
  1699. Table,
  1700. )
  1701. schema = schema or self.meta.schema
  1702. tbl = Table(table_name, self.meta, autoload_with=self.con, schema=schema)
  1703. for column in tbl.columns:
  1704. if isinstance(column.type, Numeric):
  1705. column.type.asdecimal = False
  1706. return tbl
  1707. def drop_table(self, table_name: str, schema: str | None = None) -> None:
  1708. schema = schema or self.meta.schema
  1709. if self.has_table(table_name, schema):
  1710. self.meta.reflect(bind=self.con, only=[table_name], schema=schema)
  1711. with self.run_transaction():
  1712. self.get_table(table_name, schema).drop(bind=self.con)
  1713. self.meta.clear()
  1714. def _create_sql_schema(
  1715. self,
  1716. frame: DataFrame,
  1717. table_name: str,
  1718. keys: list[str] | None = None,
  1719. dtype: DtypeArg | None = None,
  1720. schema: str | None = None,
  1721. ):
  1722. table = SQLTable(
  1723. table_name,
  1724. self,
  1725. frame=frame,
  1726. index=False,
  1727. keys=keys,
  1728. dtype=dtype,
  1729. schema=schema,
  1730. )
  1731. return str(table.sql_schema())
  1732. # ---- SQL without SQLAlchemy ---
  1733. # sqlite-specific sql strings and handler class
  1734. # dictionary used for readability purposes
  1735. _SQL_TYPES = {
  1736. "string": "TEXT",
  1737. "floating": "REAL",
  1738. "integer": "INTEGER",
  1739. "datetime": "TIMESTAMP",
  1740. "date": "DATE",
  1741. "time": "TIME",
  1742. "boolean": "INTEGER",
  1743. }
  1744. def _get_unicode_name(name):
  1745. try:
  1746. uname = str(name).encode("utf-8", "strict").decode("utf-8")
  1747. except UnicodeError as err:
  1748. raise ValueError(f"Cannot convert identifier to UTF-8: '{name}'") from err
  1749. return uname
  1750. def _get_valid_sqlite_name(name):
  1751. # See https://stackoverflow.com/questions/6514274/how-do-you-escape-strings\
  1752. # -for-sqlite-table-column-names-in-python
  1753. # Ensure the string can be encoded as UTF-8.
  1754. # Ensure the string does not include any NUL characters.
  1755. # Replace all " with "".
  1756. # Wrap the entire thing in double quotes.
  1757. uname = _get_unicode_name(name)
  1758. if not len(uname):
  1759. raise ValueError("Empty table or column name specified")
  1760. nul_index = uname.find("\x00")
  1761. if nul_index >= 0:
  1762. raise ValueError("SQLite identifier cannot contain NULs")
  1763. return '"' + uname.replace('"', '""') + '"'
  1764. class SQLiteTable(SQLTable):
  1765. """
  1766. Patch the SQLTable for fallback support.
  1767. Instead of a table variable just use the Create Table statement.
  1768. """
  1769. def __init__(self, *args, **kwargs) -> None:
  1770. # GH 8341
  1771. # register an adapter callable for datetime.time object
  1772. import sqlite3
  1773. # this will transform time(12,34,56,789) into '12:34:56.000789'
  1774. # (this is what sqlalchemy does)
  1775. def _adapt_time(t) -> str:
  1776. # This is faster than strftime
  1777. return f"{t.hour:02d}:{t.minute:02d}:{t.second:02d}.{t.microsecond:06d}"
  1778. sqlite3.register_adapter(time, _adapt_time)
  1779. super().__init__(*args, **kwargs)
  1780. def sql_schema(self) -> str:
  1781. return str(";\n".join(self.table))
  1782. def _execute_create(self) -> None:
  1783. with self.pd_sql.run_transaction() as conn:
  1784. for stmt in self.table:
  1785. conn.execute(stmt)
  1786. def insert_statement(self, *, num_rows: int) -> str:
  1787. names = list(map(str, self.frame.columns))
  1788. wld = "?" # wildcard char
  1789. escape = _get_valid_sqlite_name
  1790. if self.index is not None:
  1791. for idx in self.index[::-1]:
  1792. names.insert(0, idx)
  1793. bracketed_names = [escape(column) for column in names]
  1794. col_names = ",".join(bracketed_names)
  1795. row_wildcards = ",".join([wld] * len(names))
  1796. wildcards = ",".join([f"({row_wildcards})" for _ in range(num_rows)])
  1797. insert_statement = (
  1798. f"INSERT INTO {escape(self.name)} ({col_names}) VALUES {wildcards}"
  1799. )
  1800. return insert_statement
  1801. def _execute_insert(self, conn, keys, data_iter) -> int:
  1802. data_list = list(data_iter)
  1803. conn.executemany(self.insert_statement(num_rows=1), data_list)
  1804. return conn.rowcount
  1805. def _execute_insert_multi(self, conn, keys, data_iter) -> int:
  1806. data_list = list(data_iter)
  1807. flattened_data = [x for row in data_list for x in row]
  1808. conn.execute(self.insert_statement(num_rows=len(data_list)), flattened_data)
  1809. return conn.rowcount
  1810. def _create_table_setup(self):
  1811. """
  1812. Return a list of SQL statements that creates a table reflecting the
  1813. structure of a DataFrame. The first entry will be a CREATE TABLE
  1814. statement while the rest will be CREATE INDEX statements.
  1815. """
  1816. column_names_and_types = self._get_column_names_and_types(self._sql_type_name)
  1817. escape = _get_valid_sqlite_name
  1818. create_tbl_stmts = [
  1819. escape(cname) + " " + ctype for cname, ctype, _ in column_names_and_types
  1820. ]
  1821. if self.keys is not None and len(self.keys):
  1822. if not is_list_like(self.keys):
  1823. keys = [self.keys]
  1824. else:
  1825. keys = self.keys
  1826. cnames_br = ", ".join([escape(c) for c in keys])
  1827. create_tbl_stmts.append(
  1828. f"CONSTRAINT {self.name}_pk PRIMARY KEY ({cnames_br})"
  1829. )
  1830. if self.schema:
  1831. schema_name = self.schema + "."
  1832. else:
  1833. schema_name = ""
  1834. create_stmts = [
  1835. "CREATE TABLE "
  1836. + schema_name
  1837. + escape(self.name)
  1838. + " (\n"
  1839. + ",\n ".join(create_tbl_stmts)
  1840. + "\n)"
  1841. ]
  1842. ix_cols = [cname for cname, _, is_index in column_names_and_types if is_index]
  1843. if len(ix_cols):
  1844. cnames = "_".join(ix_cols)
  1845. cnames_br = ",".join([escape(c) for c in ix_cols])
  1846. create_stmts.append(
  1847. "CREATE INDEX "
  1848. + escape("ix_" + self.name + "_" + cnames)
  1849. + "ON "
  1850. + escape(self.name)
  1851. + " ("
  1852. + cnames_br
  1853. + ")"
  1854. )
  1855. return create_stmts
  1856. def _sql_type_name(self, col):
  1857. dtype: DtypeArg = self.dtype or {}
  1858. if is_dict_like(dtype):
  1859. dtype = cast(dict, dtype)
  1860. if col.name in dtype:
  1861. return dtype[col.name]
  1862. # Infer type of column, while ignoring missing values.
  1863. # Needed for inserting typed data containing NULLs, GH 8778.
  1864. col_type = lib.infer_dtype(col, skipna=True)
  1865. if col_type == "timedelta64":
  1866. warnings.warn(
  1867. "the 'timedelta' type is not supported, and will be "
  1868. "written as integer values (ns frequency) to the database.",
  1869. UserWarning,
  1870. stacklevel=find_stack_level(),
  1871. )
  1872. col_type = "integer"
  1873. elif col_type == "datetime64":
  1874. col_type = "datetime"
  1875. elif col_type == "empty":
  1876. col_type = "string"
  1877. elif col_type == "complex":
  1878. raise ValueError("Complex datatypes not supported")
  1879. if col_type not in _SQL_TYPES:
  1880. col_type = "string"
  1881. return _SQL_TYPES[col_type]
  1882. class SQLiteDatabase(PandasSQL):
  1883. """
  1884. Version of SQLDatabase to support SQLite connections (fallback without
  1885. SQLAlchemy). This should only be used internally.
  1886. Parameters
  1887. ----------
  1888. con : sqlite connection object
  1889. """
  1890. def __init__(self, con) -> None:
  1891. self.con = con
  1892. @contextmanager
  1893. def run_transaction(self):
  1894. cur = self.con.cursor()
  1895. try:
  1896. yield cur
  1897. self.con.commit()
  1898. except Exception:
  1899. self.con.rollback()
  1900. raise
  1901. finally:
  1902. cur.close()
  1903. def execute(self, sql: str | Select | TextClause, params=None):
  1904. if not isinstance(sql, str):
  1905. raise TypeError("Query must be a string unless using sqlalchemy.")
  1906. args = [] if params is None else [params]
  1907. cur = self.con.cursor()
  1908. try:
  1909. cur.execute(sql, *args)
  1910. return cur
  1911. except Exception as exc:
  1912. try:
  1913. self.con.rollback()
  1914. except Exception as inner_exc: # pragma: no cover
  1915. ex = DatabaseError(
  1916. f"Execution failed on sql: {sql}\n{exc}\nunable to rollback"
  1917. )
  1918. raise ex from inner_exc
  1919. ex = DatabaseError(f"Execution failed on sql '{sql}': {exc}")
  1920. raise ex from exc
  1921. @staticmethod
  1922. def _query_iterator(
  1923. cursor,
  1924. chunksize: int,
  1925. columns,
  1926. index_col=None,
  1927. coerce_float: bool = True,
  1928. parse_dates=None,
  1929. dtype: DtypeArg | None = None,
  1930. dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
  1931. ):
  1932. """Return generator through chunked result set"""
  1933. has_read_data = False
  1934. while True:
  1935. data = cursor.fetchmany(chunksize)
  1936. if type(data) == tuple:
  1937. data = list(data)
  1938. if not data:
  1939. cursor.close()
  1940. if not has_read_data:
  1941. result = DataFrame.from_records(
  1942. [], columns=columns, coerce_float=coerce_float
  1943. )
  1944. if dtype:
  1945. result = result.astype(dtype)
  1946. yield result
  1947. break
  1948. has_read_data = True
  1949. yield _wrap_result(
  1950. data,
  1951. columns,
  1952. index_col=index_col,
  1953. coerce_float=coerce_float,
  1954. parse_dates=parse_dates,
  1955. dtype=dtype,
  1956. dtype_backend=dtype_backend,
  1957. )
  1958. def read_query(
  1959. self,
  1960. sql,
  1961. index_col=None,
  1962. coerce_float: bool = True,
  1963. parse_dates=None,
  1964. params=None,
  1965. chunksize: int | None = None,
  1966. dtype: DtypeArg | None = None,
  1967. dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
  1968. ) -> DataFrame | Iterator[DataFrame]:
  1969. cursor = self.execute(sql, params)
  1970. columns = [col_desc[0] for col_desc in cursor.description]
  1971. if chunksize is not None:
  1972. return self._query_iterator(
  1973. cursor,
  1974. chunksize,
  1975. columns,
  1976. index_col=index_col,
  1977. coerce_float=coerce_float,
  1978. parse_dates=parse_dates,
  1979. dtype=dtype,
  1980. dtype_backend=dtype_backend,
  1981. )
  1982. else:
  1983. data = self._fetchall_as_list(cursor)
  1984. cursor.close()
  1985. frame = _wrap_result(
  1986. data,
  1987. columns,
  1988. index_col=index_col,
  1989. coerce_float=coerce_float,
  1990. parse_dates=parse_dates,
  1991. dtype=dtype,
  1992. dtype_backend=dtype_backend,
  1993. )
  1994. return frame
  1995. def _fetchall_as_list(self, cur):
  1996. result = cur.fetchall()
  1997. if not isinstance(result, list):
  1998. result = list(result)
  1999. return result
  2000. def to_sql(
  2001. self,
  2002. frame,
  2003. name,
  2004. if_exists: str = "fail",
  2005. index: bool = True,
  2006. index_label=None,
  2007. schema=None,
  2008. chunksize=None,
  2009. dtype: DtypeArg | None = None,
  2010. method=None,
  2011. engine: str = "auto",
  2012. **engine_kwargs,
  2013. ) -> int | None:
  2014. """
  2015. Write records stored in a DataFrame to a SQL database.
  2016. Parameters
  2017. ----------
  2018. frame: DataFrame
  2019. name: string
  2020. Name of SQL table.
  2021. if_exists: {'fail', 'replace', 'append'}, default 'fail'
  2022. fail: If table exists, do nothing.
  2023. replace: If table exists, drop it, recreate it, and insert data.
  2024. append: If table exists, insert data. Create if it does not exist.
  2025. index : bool, default True
  2026. Write DataFrame index as a column
  2027. index_label : string or sequence, default None
  2028. Column label for index column(s). If None is given (default) and
  2029. `index` is True, then the index names are used.
  2030. A sequence should be given if the DataFrame uses MultiIndex.
  2031. schema : string, default None
  2032. Ignored parameter included for compatibility with SQLAlchemy
  2033. version of ``to_sql``.
  2034. chunksize : int, default None
  2035. If not None, then rows will be written in batches of this
  2036. size at a time. If None, all rows will be written at once.
  2037. dtype : single type or dict of column name to SQL type, default None
  2038. Optional specifying the datatype for columns. The SQL type should
  2039. be a string. If all columns are of the same type, one single value
  2040. can be used.
  2041. method : {None, 'multi', callable}, default None
  2042. Controls the SQL insertion clause used:
  2043. * None : Uses standard SQL ``INSERT`` clause (one per row).
  2044. * 'multi': Pass multiple values in a single ``INSERT`` clause.
  2045. * callable with signature ``(pd_table, conn, keys, data_iter)``.
  2046. Details and a sample callable implementation can be found in the
  2047. section :ref:`insert method <io.sql.method>`.
  2048. """
  2049. if dtype:
  2050. if not is_dict_like(dtype):
  2051. # error: Value expression in dictionary comprehension has incompatible
  2052. # type "Union[ExtensionDtype, str, dtype[Any], Type[object],
  2053. # Dict[Hashable, Union[ExtensionDtype, Union[str, dtype[Any]],
  2054. # Type[str], Type[float], Type[int], Type[complex], Type[bool],
  2055. # Type[object]]]]"; expected type "Union[ExtensionDtype, str,
  2056. # dtype[Any], Type[object]]"
  2057. dtype = {col_name: dtype for col_name in frame} # type: ignore[misc]
  2058. else:
  2059. dtype = cast(dict, dtype)
  2060. for col, my_type in dtype.items():
  2061. if not isinstance(my_type, str):
  2062. raise ValueError(f"{col} ({my_type}) not a string")
  2063. table = SQLiteTable(
  2064. name,
  2065. self,
  2066. frame=frame,
  2067. index=index,
  2068. if_exists=if_exists,
  2069. index_label=index_label,
  2070. dtype=dtype,
  2071. )
  2072. table.create()
  2073. return table.insert(chunksize, method)
  2074. def has_table(self, name: str, schema: str | None = None) -> bool:
  2075. wld = "?"
  2076. query = f"SELECT name FROM sqlite_master WHERE type='table' AND name={wld};"
  2077. return len(self.execute(query, [name]).fetchall()) > 0
  2078. def get_table(self, table_name: str, schema: str | None = None) -> None:
  2079. return None # not supported in fallback mode
  2080. def drop_table(self, name: str, schema: str | None = None) -> None:
  2081. drop_sql = f"DROP TABLE {_get_valid_sqlite_name(name)}"
  2082. self.execute(drop_sql)
  2083. def _create_sql_schema(
  2084. self,
  2085. frame,
  2086. table_name: str,
  2087. keys=None,
  2088. dtype: DtypeArg | None = None,
  2089. schema: str | None = None,
  2090. ):
  2091. table = SQLiteTable(
  2092. table_name,
  2093. self,
  2094. frame=frame,
  2095. index=False,
  2096. keys=keys,
  2097. dtype=dtype,
  2098. schema=schema,
  2099. )
  2100. return str(table.sql_schema())
  2101. def get_schema(
  2102. frame,
  2103. name: str,
  2104. keys=None,
  2105. con=None,
  2106. dtype: DtypeArg | None = None,
  2107. schema: str | None = None,
  2108. ) -> str:
  2109. """
  2110. Get the SQL db table schema for the given frame.
  2111. Parameters
  2112. ----------
  2113. frame : DataFrame
  2114. name : str
  2115. name of SQL table
  2116. keys : string or sequence, default: None
  2117. columns to use a primary key
  2118. con: an open SQL database connection object or a SQLAlchemy connectable
  2119. Using SQLAlchemy makes it possible to use any DB supported by that
  2120. library, default: None
  2121. If a DBAPI2 object, only sqlite3 is supported.
  2122. dtype : dict of column name to SQL type, default None
  2123. Optional specifying the datatype for columns. The SQL type should
  2124. be a SQLAlchemy type, or a string for sqlite3 fallback connection.
  2125. schema: str, default: None
  2126. Optional specifying the schema to be used in creating the table.
  2127. .. versionadded:: 1.2.0
  2128. """
  2129. with pandasSQL_builder(con=con) as pandas_sql:
  2130. return pandas_sql._create_sql_schema(
  2131. frame, name, keys=keys, dtype=dtype, schema=schema
  2132. )