12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447 |
- """
- Collection of query wrappers / abstractions to both facilitate data
- retrieval and to reduce dependency on DB-specific API.
- """
- from __future__ import annotations
- from abc import (
- ABC,
- abstractmethod,
- )
- from contextlib import (
- ExitStack,
- contextmanager,
- )
- from datetime import (
- date,
- datetime,
- time,
- )
- from functools import partial
- import re
- from typing import (
- TYPE_CHECKING,
- Any,
- Iterator,
- Literal,
- cast,
- overload,
- )
- import warnings
- import numpy as np
- from pandas._libs import lib
- from pandas._typing import (
- DateTimeErrorChoices,
- DtypeArg,
- DtypeBackend,
- IndexLabel,
- )
- from pandas.compat._optional import import_optional_dependency
- from pandas.errors import (
- AbstractMethodError,
- DatabaseError,
- )
- from pandas.util._exceptions import find_stack_level
- from pandas.util._validators import check_dtype_backend
- from pandas.core.dtypes.common import (
- is_datetime64tz_dtype,
- is_dict_like,
- is_integer,
- is_list_like,
- )
- from pandas.core.dtypes.dtypes import DatetimeTZDtype
- from pandas.core.dtypes.missing import isna
- from pandas import get_option
- from pandas.core.api import (
- DataFrame,
- Series,
- )
- from pandas.core.arrays import ArrowExtensionArray
- from pandas.core.base import PandasObject
- import pandas.core.common as com
- from pandas.core.internals.construction import convert_object_array
- from pandas.core.tools.datetimes import to_datetime
- if TYPE_CHECKING:
- from sqlalchemy import Table
- from sqlalchemy.sql.expression import (
- Select,
- TextClause,
- )
- # -----------------------------------------------------------------------------
- # -- Helper functions
- def _process_parse_dates_argument(parse_dates):
- """Process parse_dates argument for read_sql functions"""
- # handle non-list entries for parse_dates gracefully
- if parse_dates is True or parse_dates is None or parse_dates is False:
- parse_dates = []
- elif not hasattr(parse_dates, "__iter__"):
- parse_dates = [parse_dates]
- return parse_dates
- def _handle_date_column(
- col, utc: bool = False, format: str | dict[str, Any] | None = None
- ):
- if isinstance(format, dict):
- # GH35185 Allow custom error values in parse_dates argument of
- # read_sql like functions.
- # Format can take on custom to_datetime argument values such as
- # {"errors": "coerce"} or {"dayfirst": True}
- error: DateTimeErrorChoices = format.pop("errors", None) or "ignore"
- return to_datetime(col, errors=error, **format)
- else:
- # Allow passing of formatting string for integers
- # GH17855
- if format is None and (
- issubclass(col.dtype.type, np.floating)
- or issubclass(col.dtype.type, np.integer)
- ):
- format = "s"
- if format in ["D", "d", "h", "m", "s", "ms", "us", "ns"]:
- return to_datetime(col, errors="coerce", unit=format, utc=utc)
- elif is_datetime64tz_dtype(col.dtype):
- # coerce to UTC timezone
- # GH11216
- return to_datetime(col, utc=True)
- else:
- return to_datetime(col, errors="coerce", format=format, utc=utc)
- def _parse_date_columns(data_frame, parse_dates):
- """
- Force non-datetime columns to be read as such.
- Supports both string formatted and integer timestamp columns.
- """
- parse_dates = _process_parse_dates_argument(parse_dates)
- # we want to coerce datetime64_tz dtypes for now to UTC
- # we could in theory do a 'nice' conversion from a FixedOffset tz
- # GH11216
- for col_name, df_col in data_frame.items():
- if is_datetime64tz_dtype(df_col.dtype) or col_name in parse_dates:
- try:
- fmt = parse_dates[col_name]
- except TypeError:
- fmt = None
- data_frame[col_name] = _handle_date_column(df_col, format=fmt)
- return data_frame
- def _convert_arrays_to_dataframe(
- data,
- columns,
- coerce_float: bool = True,
- dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
- ) -> DataFrame:
- content = lib.to_object_array_tuples(data)
- arrays = convert_object_array(
- list(content.T),
- dtype=None,
- coerce_float=coerce_float,
- dtype_backend=dtype_backend,
- )
- if dtype_backend == "pyarrow":
- pa = import_optional_dependency("pyarrow")
- arrays = [
- ArrowExtensionArray(pa.array(arr, from_pandas=True)) for arr in arrays
- ]
- if arrays:
- df = DataFrame(dict(zip(list(range(len(columns))), arrays)))
- df.columns = columns
- return df
- else:
- return DataFrame(columns=columns)
- def _wrap_result(
- data,
- columns,
- index_col=None,
- coerce_float: bool = True,
- parse_dates=None,
- dtype: DtypeArg | None = None,
- dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
- ):
- """Wrap result set of query in a DataFrame."""
- frame = _convert_arrays_to_dataframe(data, columns, coerce_float, dtype_backend)
- if dtype:
- frame = frame.astype(dtype)
- frame = _parse_date_columns(frame, parse_dates)
- if index_col is not None:
- frame = frame.set_index(index_col)
- return frame
- def execute(sql, con, params=None):
- """
- Execute the given SQL query using the provided connection object.
- Parameters
- ----------
- sql : string
- SQL query to be executed.
- con : SQLAlchemy connection or sqlite3 connection
- If a DBAPI2 object, only sqlite3 is supported.
- params : list or tuple, optional, default: None
- List of parameters to pass to execute method.
- Returns
- -------
- Results Iterable
- """
- warnings.warn(
- "`pandas.io.sql.execute` is deprecated and "
- "will be removed in the future version.",
- FutureWarning,
- stacklevel=find_stack_level(),
- ) # GH50185
- sqlalchemy = import_optional_dependency("sqlalchemy", errors="ignore")
- if sqlalchemy is not None and isinstance(con, (str, sqlalchemy.engine.Engine)):
- raise TypeError("pandas.io.sql.execute requires a connection") # GH50185
- with pandasSQL_builder(con, need_transaction=True) as pandas_sql:
- return pandas_sql.execute(sql, params)
- # -----------------------------------------------------------------------------
- # -- Read and write to DataFrames
- @overload
- def read_sql_table(
- table_name,
- con,
- schema=...,
- index_col: str | list[str] | None = ...,
- coerce_float=...,
- parse_dates: list[str] | dict[str, str] | None = ...,
- columns: list[str] | None = ...,
- chunksize: None = ...,
- dtype_backend: DtypeBackend | lib.NoDefault = ...,
- ) -> DataFrame:
- ...
- @overload
- def read_sql_table(
- table_name,
- con,
- schema=...,
- index_col: str | list[str] | None = ...,
- coerce_float=...,
- parse_dates: list[str] | dict[str, str] | None = ...,
- columns: list[str] | None = ...,
- chunksize: int = ...,
- dtype_backend: DtypeBackend | lib.NoDefault = ...,
- ) -> Iterator[DataFrame]:
- ...
- def read_sql_table(
- table_name: str,
- con,
- schema: str | None = None,
- index_col: str | list[str] | None = None,
- coerce_float: bool = True,
- parse_dates: list[str] | dict[str, str] | None = None,
- columns: list[str] | None = None,
- chunksize: int | None = None,
- dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
- ) -> DataFrame | Iterator[DataFrame]:
- """
- Read SQL database table into a DataFrame.
- Given a table name and a SQLAlchemy connectable, returns a DataFrame.
- This function does not support DBAPI connections.
- Parameters
- ----------
- table_name : str
- Name of SQL table in database.
- con : SQLAlchemy connectable or str
- A database URI could be provided as str.
- SQLite DBAPI connection mode not supported.
- schema : str, default None
- Name of SQL schema in database to query (if database flavor
- supports this). Uses default schema if None (default).
- index_col : str or list of str, optional, default: None
- Column(s) to set as index(MultiIndex).
- coerce_float : bool, default True
- Attempts to convert values of non-string, non-numeric objects (like
- decimal.Decimal) to floating point. Can result in loss of Precision.
- parse_dates : list or dict, default None
- - List of column names to parse as dates.
- - Dict of ``{column_name: format string}`` where format string is
- strftime compatible in case of parsing string times or is one of
- (D, s, ns, ms, us) in case of parsing integer timestamps.
- - Dict of ``{column_name: arg dict}``, where the arg dict corresponds
- to the keyword arguments of :func:`pandas.to_datetime`
- Especially useful with databases without native Datetime support,
- such as SQLite.
- columns : list, default None
- List of column names to select from SQL table.
- chunksize : int, default None
- If specified, returns an iterator where `chunksize` is the number of
- rows to include in each chunk.
- dtype_backend : {"numpy_nullable", "pyarrow"}, defaults to NumPy backed DataFrames
- Which dtype_backend to use, e.g. whether a DataFrame should have NumPy
- arrays, nullable dtypes are used for all dtypes that have a nullable
- implementation when "numpy_nullable" is set, pyarrow is used for all
- dtypes if "pyarrow" is set.
- The dtype_backends are still experimential.
- .. versionadded:: 2.0
- Returns
- -------
- DataFrame or Iterator[DataFrame]
- A SQL table is returned as two-dimensional data structure with labeled
- axes.
- See Also
- --------
- read_sql_query : Read SQL query into a DataFrame.
- read_sql : Read SQL query or database table into a DataFrame.
- Notes
- -----
- Any datetime values with time zone information will be converted to UTC.
- Examples
- --------
- >>> pd.read_sql_table('table_name', 'postgres:///db_name') # doctest:+SKIP
- """
- check_dtype_backend(dtype_backend)
- if dtype_backend is lib.no_default:
- dtype_backend = "numpy" # type: ignore[assignment]
- with pandasSQL_builder(con, schema=schema, need_transaction=True) as pandas_sql:
- if not pandas_sql.has_table(table_name):
- raise ValueError(f"Table {table_name} not found")
- table = pandas_sql.read_table(
- table_name,
- index_col=index_col,
- coerce_float=coerce_float,
- parse_dates=parse_dates,
- columns=columns,
- chunksize=chunksize,
- dtype_backend=dtype_backend,
- )
- if table is not None:
- return table
- else:
- raise ValueError(f"Table {table_name} not found", con)
- @overload
- def read_sql_query(
- sql,
- con,
- index_col: str | list[str] | None = ...,
- coerce_float=...,
- params: list[str] | dict[str, str] | None = ...,
- parse_dates: list[str] | dict[str, str] | None = ...,
- chunksize: None = ...,
- dtype: DtypeArg | None = ...,
- dtype_backend: DtypeBackend | lib.NoDefault = ...,
- ) -> DataFrame:
- ...
- @overload
- def read_sql_query(
- sql,
- con,
- index_col: str | list[str] | None = ...,
- coerce_float=...,
- params: list[str] | dict[str, str] | None = ...,
- parse_dates: list[str] | dict[str, str] | None = ...,
- chunksize: int = ...,
- dtype: DtypeArg | None = ...,
- dtype_backend: DtypeBackend | lib.NoDefault = ...,
- ) -> Iterator[DataFrame]:
- ...
- def read_sql_query(
- sql,
- con,
- index_col: str | list[str] | None = None,
- coerce_float: bool = True,
- params: list[str] | dict[str, str] | None = None,
- parse_dates: list[str] | dict[str, str] | None = None,
- chunksize: int | None = None,
- dtype: DtypeArg | None = None,
- dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
- ) -> DataFrame | Iterator[DataFrame]:
- """
- Read SQL query into a DataFrame.
- Returns a DataFrame corresponding to the result set of the query
- string. Optionally provide an `index_col` parameter to use one of the
- columns as the index, otherwise default integer index will be used.
- Parameters
- ----------
- sql : str SQL query or SQLAlchemy Selectable (select or text object)
- SQL query to be executed.
- con : SQLAlchemy connectable, str, or sqlite3 connection
- Using SQLAlchemy makes it possible to use any DB supported by that
- library. If a DBAPI2 object, only sqlite3 is supported.
- index_col : str or list of str, optional, default: None
- Column(s) to set as index(MultiIndex).
- coerce_float : bool, default True
- Attempts to convert values of non-string, non-numeric objects (like
- decimal.Decimal) to floating point. Useful for SQL result sets.
- params : list, tuple or dict, optional, default: None
- List of parameters to pass to execute method. The syntax used
- to pass parameters is database driver dependent. Check your
- database driver documentation for which of the five syntax styles,
- described in PEP 249's paramstyle, is supported.
- Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}.
- parse_dates : list or dict, default: None
- - List of column names to parse as dates.
- - Dict of ``{column_name: format string}`` where format string is
- strftime compatible in case of parsing string times, or is one of
- (D, s, ns, ms, us) in case of parsing integer timestamps.
- - Dict of ``{column_name: arg dict}``, where the arg dict corresponds
- to the keyword arguments of :func:`pandas.to_datetime`
- Especially useful with databases without native Datetime support,
- such as SQLite.
- chunksize : int, default None
- If specified, return an iterator where `chunksize` is the number of
- rows to include in each chunk.
- dtype : Type name or dict of columns
- Data type for data or columns. E.g. np.float64 or
- {‘a’: np.float64, ‘b’: np.int32, ‘c’: ‘Int64’}.
- .. versionadded:: 1.3.0
- dtype_backend : {"numpy_nullable", "pyarrow"}, defaults to NumPy backed DataFrames
- Which dtype_backend to use, e.g. whether a DataFrame should have NumPy
- arrays, nullable dtypes are used for all dtypes that have a nullable
- implementation when "numpy_nullable" is set, pyarrow is used for all
- dtypes if "pyarrow" is set.
- The dtype_backends are still experimential.
- .. versionadded:: 2.0
- Returns
- -------
- DataFrame or Iterator[DataFrame]
- See Also
- --------
- read_sql_table : Read SQL database table into a DataFrame.
- read_sql : Read SQL query or database table into a DataFrame.
- Notes
- -----
- Any datetime values with time zone information parsed via the `parse_dates`
- parameter will be converted to UTC.
- """
- check_dtype_backend(dtype_backend)
- if dtype_backend is lib.no_default:
- dtype_backend = "numpy" # type: ignore[assignment]
- with pandasSQL_builder(con) as pandas_sql:
- return pandas_sql.read_query(
- sql,
- index_col=index_col,
- params=params,
- coerce_float=coerce_float,
- parse_dates=parse_dates,
- chunksize=chunksize,
- dtype=dtype,
- dtype_backend=dtype_backend,
- )
- @overload
- def read_sql(
- sql,
- con,
- index_col: str | list[str] | None = ...,
- coerce_float=...,
- params=...,
- parse_dates=...,
- columns: list[str] = ...,
- chunksize: None = ...,
- dtype_backend: DtypeBackend | lib.NoDefault = ...,
- dtype: DtypeArg | None = None,
- ) -> DataFrame:
- ...
- @overload
- def read_sql(
- sql,
- con,
- index_col: str | list[str] | None = ...,
- coerce_float=...,
- params=...,
- parse_dates=...,
- columns: list[str] = ...,
- chunksize: int = ...,
- dtype_backend: DtypeBackend | lib.NoDefault = ...,
- dtype: DtypeArg | None = None,
- ) -> Iterator[DataFrame]:
- ...
- def read_sql(
- sql,
- con,
- index_col: str | list[str] | None = None,
- coerce_float: bool = True,
- params=None,
- parse_dates=None,
- columns: list[str] | None = None,
- chunksize: int | None = None,
- dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
- dtype: DtypeArg | None = None,
- ) -> DataFrame | Iterator[DataFrame]:
- """
- Read SQL query or database table into a DataFrame.
- This function is a convenience wrapper around ``read_sql_table`` and
- ``read_sql_query`` (for backward compatibility). It will delegate
- to the specific function depending on the provided input. A SQL query
- will be routed to ``read_sql_query``, while a database table name will
- be routed to ``read_sql_table``. Note that the delegated function might
- have more specific notes about their functionality not listed here.
- Parameters
- ----------
- sql : str or SQLAlchemy Selectable (select or text object)
- SQL query to be executed or a table name.
- con : SQLAlchemy connectable, str, or sqlite3 connection
- Using SQLAlchemy makes it possible to use any DB supported by that
- library. If a DBAPI2 object, only sqlite3 is supported. The user is responsible
- for engine disposal and connection closure for the SQLAlchemy connectable; str
- connections are closed automatically. See
- `here <https://docs.sqlalchemy.org/en/13/core/connections.html>`_.
- index_col : str or list of str, optional, default: None
- Column(s) to set as index(MultiIndex).
- coerce_float : bool, default True
- Attempts to convert values of non-string, non-numeric objects (like
- decimal.Decimal) to floating point, useful for SQL result sets.
- params : list, tuple or dict, optional, default: None
- List of parameters to pass to execute method. The syntax used
- to pass parameters is database driver dependent. Check your
- database driver documentation for which of the five syntax styles,
- described in PEP 249's paramstyle, is supported.
- Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}.
- parse_dates : list or dict, default: None
- - List of column names to parse as dates.
- - Dict of ``{column_name: format string}`` where format string is
- strftime compatible in case of parsing string times, or is one of
- (D, s, ns, ms, us) in case of parsing integer timestamps.
- - Dict of ``{column_name: arg dict}``, where the arg dict corresponds
- to the keyword arguments of :func:`pandas.to_datetime`
- Especially useful with databases without native Datetime support,
- such as SQLite.
- columns : list, default: None
- List of column names to select from SQL table (only used when reading
- a table).
- chunksize : int, default None
- If specified, return an iterator where `chunksize` is the
- number of rows to include in each chunk.
- dtype_backend : {"numpy_nullable", "pyarrow"}, defaults to NumPy backed DataFrames
- Which dtype_backend to use, e.g. whether a DataFrame should have NumPy
- arrays, nullable dtypes are used for all dtypes that have a nullable
- implementation when "numpy_nullable" is set, pyarrow is used for all
- dtypes if "pyarrow" is set.
- The dtype_backends are still experimential.
- .. versionadded:: 2.0
- dtype : Type name or dict of columns
- Data type for data or columns. E.g. np.float64 or
- {‘a’: np.float64, ‘b’: np.int32, ‘c’: ‘Int64’}.
- The argument is ignored if a table is passed instead of a query.
- .. versionadded:: 2.0.0
- Returns
- -------
- DataFrame or Iterator[DataFrame]
- See Also
- --------
- read_sql_table : Read SQL database table into a DataFrame.
- read_sql_query : Read SQL query into a DataFrame.
- Examples
- --------
- Read data from SQL via either a SQL query or a SQL tablename.
- When using a SQLite database only SQL queries are accepted,
- providing only the SQL tablename will result in an error.
- >>> from sqlite3 import connect
- >>> conn = connect(':memory:')
- >>> df = pd.DataFrame(data=[[0, '10/11/12'], [1, '12/11/10']],
- ... columns=['int_column', 'date_column'])
- >>> df.to_sql('test_data', conn)
- 2
- >>> pd.read_sql('SELECT int_column, date_column FROM test_data', conn)
- int_column date_column
- 0 0 10/11/12
- 1 1 12/11/10
- >>> pd.read_sql('test_data', 'postgres:///db_name') # doctest:+SKIP
- Apply date parsing to columns through the ``parse_dates`` argument
- The ``parse_dates`` argument calls ``pd.to_datetime`` on the provided columns.
- Custom argument values for applying ``pd.to_datetime`` on a column are specified
- via a dictionary format:
- >>> pd.read_sql('SELECT int_column, date_column FROM test_data',
- ... conn,
- ... parse_dates={"date_column": {"format": "%d/%m/%y"}})
- int_column date_column
- 0 0 2012-11-10
- 1 1 2010-11-12
- """
- check_dtype_backend(dtype_backend)
- if dtype_backend is lib.no_default:
- dtype_backend = "numpy" # type: ignore[assignment]
- with pandasSQL_builder(con) as pandas_sql:
- if isinstance(pandas_sql, SQLiteDatabase):
- return pandas_sql.read_query(
- sql,
- index_col=index_col,
- params=params,
- coerce_float=coerce_float,
- parse_dates=parse_dates,
- chunksize=chunksize,
- dtype_backend=dtype_backend, # type: ignore[arg-type]
- dtype=dtype,
- )
- try:
- _is_table_name = pandas_sql.has_table(sql)
- except Exception:
- # using generic exception to catch errors from sql drivers (GH24988)
- _is_table_name = False
- if _is_table_name:
- return pandas_sql.read_table(
- sql,
- index_col=index_col,
- coerce_float=coerce_float,
- parse_dates=parse_dates,
- columns=columns,
- chunksize=chunksize,
- dtype_backend=dtype_backend,
- )
- else:
- return pandas_sql.read_query(
- sql,
- index_col=index_col,
- params=params,
- coerce_float=coerce_float,
- parse_dates=parse_dates,
- chunksize=chunksize,
- dtype_backend=dtype_backend,
- dtype=dtype,
- )
- def to_sql(
- frame,
- name: str,
- con,
- schema: str | None = None,
- if_exists: Literal["fail", "replace", "append"] = "fail",
- index: bool = True,
- index_label: IndexLabel = None,
- chunksize: int | None = None,
- dtype: DtypeArg | None = None,
- method: str | None = None,
- engine: str = "auto",
- **engine_kwargs,
- ) -> int | None:
- """
- Write records stored in a DataFrame to a SQL database.
- Parameters
- ----------
- frame : DataFrame, Series
- name : str
- Name of SQL table.
- con : SQLAlchemy connectable(engine/connection) or database string URI
- or sqlite3 DBAPI2 connection
- Using SQLAlchemy makes it possible to use any DB supported by that
- library.
- If a DBAPI2 object, only sqlite3 is supported.
- schema : str, optional
- Name of SQL schema in database to write to (if database flavor
- supports this). If None, use default schema (default).
- if_exists : {'fail', 'replace', 'append'}, default 'fail'
- - fail: If table exists, do nothing.
- - replace: If table exists, drop it, recreate it, and insert data.
- - append: If table exists, insert data. Create if does not exist.
- index : bool, default True
- Write DataFrame index as a column.
- index_label : str or sequence, optional
- Column label for index column(s). If None is given (default) and
- `index` is True, then the index names are used.
- A sequence should be given if the DataFrame uses MultiIndex.
- chunksize : int, optional
- Specify the number of rows in each batch to be written at a time.
- By default, all rows will be written at once.
- dtype : dict or scalar, optional
- Specifying the datatype for columns. If a dictionary is used, the
- keys should be the column names and the values should be the
- SQLAlchemy types or strings for the sqlite3 fallback mode. If a
- scalar is provided, it will be applied to all columns.
- method : {None, 'multi', callable}, optional
- Controls the SQL insertion clause used:
- - None : Uses standard SQL ``INSERT`` clause (one per row).
- - ``'multi'``: Pass multiple values in a single ``INSERT`` clause.
- - callable with signature ``(pd_table, conn, keys, data_iter) -> int | None``.
- Details and a sample callable implementation can be found in the
- section :ref:`insert method <io.sql.method>`.
- engine : {'auto', 'sqlalchemy'}, default 'auto'
- SQL engine library to use. If 'auto', then the option
- ``io.sql.engine`` is used. The default ``io.sql.engine``
- behavior is 'sqlalchemy'
- .. versionadded:: 1.3.0
- **engine_kwargs
- Any additional kwargs are passed to the engine.
- Returns
- -------
- None or int
- Number of rows affected by to_sql. None is returned if the callable
- passed into ``method`` does not return an integer number of rows.
- .. versionadded:: 1.4.0
- Notes
- -----
- The returned rows affected is the sum of the ``rowcount`` attribute of ``sqlite3.Cursor``
- or SQLAlchemy connectable. The returned value may not reflect the exact number of written
- rows as stipulated in the
- `sqlite3 <https://docs.python.org/3/library/sqlite3.html#sqlite3.Cursor.rowcount>`__ or
- `SQLAlchemy <https://docs.sqlalchemy.org/en/14/core/connections.html#sqlalchemy.engine.BaseCursorResult.rowcount>`__
- """ # noqa:E501
- if if_exists not in ("fail", "replace", "append"):
- raise ValueError(f"'{if_exists}' is not valid for if_exists")
- if isinstance(frame, Series):
- frame = frame.to_frame()
- elif not isinstance(frame, DataFrame):
- raise NotImplementedError(
- "'frame' argument should be either a Series or a DataFrame"
- )
- with pandasSQL_builder(con, schema=schema, need_transaction=True) as pandas_sql:
- return pandas_sql.to_sql(
- frame,
- name,
- if_exists=if_exists,
- index=index,
- index_label=index_label,
- schema=schema,
- chunksize=chunksize,
- dtype=dtype,
- method=method,
- engine=engine,
- **engine_kwargs,
- )
- def has_table(table_name: str, con, schema: str | None = None) -> bool:
- """
- Check if DataBase has named table.
- Parameters
- ----------
- table_name: string
- Name of SQL table.
- con: SQLAlchemy connectable(engine/connection) or sqlite3 DBAPI2 connection
- Using SQLAlchemy makes it possible to use any DB supported by that
- library.
- If a DBAPI2 object, only sqlite3 is supported.
- schema : string, default None
- Name of SQL schema in database to write to (if database flavor supports
- this). If None, use default schema (default).
- Returns
- -------
- boolean
- """
- with pandasSQL_builder(con, schema=schema) as pandas_sql:
- return pandas_sql.has_table(table_name)
- table_exists = has_table
- def pandasSQL_builder(
- con,
- schema: str | None = None,
- need_transaction: bool = False,
- ) -> PandasSQL:
- """
- Convenience function to return the correct PandasSQL subclass based on the
- provided parameters. Also creates a sqlalchemy connection and transaction
- if necessary.
- """
- import sqlite3
- if isinstance(con, sqlite3.Connection) or con is None:
- return SQLiteDatabase(con)
- sqlalchemy = import_optional_dependency("sqlalchemy", errors="ignore")
- if isinstance(con, str) and sqlalchemy is None:
- raise ImportError("Using URI string without sqlalchemy installed.")
- if sqlalchemy is not None and isinstance(con, (str, sqlalchemy.engine.Connectable)):
- return SQLDatabase(con, schema, need_transaction)
- warnings.warn(
- "pandas only supports SQLAlchemy connectable (engine/connection) or "
- "database string URI or sqlite3 DBAPI2 connection. Other DBAPI2 "
- "objects are not tested. Please consider using SQLAlchemy.",
- UserWarning,
- stacklevel=find_stack_level(),
- )
- return SQLiteDatabase(con)
- class SQLTable(PandasObject):
- """
- For mapping Pandas tables to SQL tables.
- Uses fact that table is reflected by SQLAlchemy to
- do better type conversions.
- Also holds various flags needed to avoid having to
- pass them between functions all the time.
- """
- # TODO: support for multiIndex
- def __init__(
- self,
- name: str,
- pandas_sql_engine,
- frame=None,
- index: bool | str | list[str] | None = True,
- if_exists: Literal["fail", "replace", "append"] = "fail",
- prefix: str = "pandas",
- index_label=None,
- schema=None,
- keys=None,
- dtype: DtypeArg | None = None,
- ) -> None:
- self.name = name
- self.pd_sql = pandas_sql_engine
- self.prefix = prefix
- self.frame = frame
- self.index = self._index_name(index, index_label)
- self.schema = schema
- self.if_exists = if_exists
- self.keys = keys
- self.dtype = dtype
- if frame is not None:
- # We want to initialize based on a dataframe
- self.table = self._create_table_setup()
- else:
- # no data provided, read-only mode
- self.table = self.pd_sql.get_table(self.name, self.schema)
- if self.table is None:
- raise ValueError(f"Could not init table '{name}'")
- def exists(self):
- return self.pd_sql.has_table(self.name, self.schema)
- def sql_schema(self) -> str:
- from sqlalchemy.schema import CreateTable
- return str(CreateTable(self.table).compile(self.pd_sql.con))
- def _execute_create(self) -> None:
- # Inserting table into database, add to MetaData object
- self.table = self.table.to_metadata(self.pd_sql.meta)
- with self.pd_sql.run_transaction():
- self.table.create(bind=self.pd_sql.con)
- def create(self) -> None:
- if self.exists():
- if self.if_exists == "fail":
- raise ValueError(f"Table '{self.name}' already exists.")
- if self.if_exists == "replace":
- self.pd_sql.drop_table(self.name, self.schema)
- self._execute_create()
- elif self.if_exists == "append":
- pass
- else:
- raise ValueError(f"'{self.if_exists}' is not valid for if_exists")
- else:
- self._execute_create()
- def _execute_insert(self, conn, keys: list[str], data_iter) -> int:
- """
- Execute SQL statement inserting data
- Parameters
- ----------
- conn : sqlalchemy.engine.Engine or sqlalchemy.engine.Connection
- keys : list of str
- Column names
- data_iter : generator of list
- Each item contains a list of values to be inserted
- """
- data = [dict(zip(keys, row)) for row in data_iter]
- result = conn.execute(self.table.insert(), data)
- return result.rowcount
- def _execute_insert_multi(self, conn, keys: list[str], data_iter) -> int:
- """
- Alternative to _execute_insert for DBs support multivalue INSERT.
- Note: multi-value insert is usually faster for analytics DBs
- and tables containing a few columns
- but performance degrades quickly with increase of columns.
- """
- from sqlalchemy import insert
- data = [dict(zip(keys, row)) for row in data_iter]
- stmt = insert(self.table).values(data)
- result = conn.execute(stmt)
- return result.rowcount
- def insert_data(self) -> tuple[list[str], list[np.ndarray]]:
- if self.index is not None:
- temp = self.frame.copy()
- temp.index.names = self.index
- try:
- temp.reset_index(inplace=True)
- except ValueError as err:
- raise ValueError(f"duplicate name in index/columns: {err}") from err
- else:
- temp = self.frame
- column_names = list(map(str, temp.columns))
- ncols = len(column_names)
- # this just pre-allocates the list: None's will be replaced with ndarrays
- # error: List item 0 has incompatible type "None"; expected "ndarray"
- data_list: list[np.ndarray] = [None] * ncols # type: ignore[list-item]
- for i, (_, ser) in enumerate(temp.items()):
- if ser.dtype.kind == "M":
- d = ser.dt.to_pydatetime()
- elif ser.dtype.kind == "m":
- vals = ser._values
- if isinstance(vals, ArrowExtensionArray):
- vals = vals.to_numpy(dtype=np.dtype("m8[ns]"))
- # store as integers, see GH#6921, GH#7076
- d = vals.view("i8").astype(object)
- else:
- d = ser._values.astype(object)
- assert isinstance(d, np.ndarray), type(d)
- if ser._can_hold_na:
- # Note: this will miss timedeltas since they are converted to int
- mask = isna(d)
- d[mask] = None
- data_list[i] = d
- return column_names, data_list
- def insert(
- self, chunksize: int | None = None, method: str | None = None
- ) -> int | None:
- # set insert method
- if method is None:
- exec_insert = self._execute_insert
- elif method == "multi":
- exec_insert = self._execute_insert_multi
- elif callable(method):
- exec_insert = partial(method, self)
- else:
- raise ValueError(f"Invalid parameter `method`: {method}")
- keys, data_list = self.insert_data()
- nrows = len(self.frame)
- if nrows == 0:
- return 0
- if chunksize is None:
- chunksize = nrows
- elif chunksize == 0:
- raise ValueError("chunksize argument should be non-zero")
- chunks = (nrows // chunksize) + 1
- total_inserted = None
- with self.pd_sql.run_transaction() as conn:
- for i in range(chunks):
- start_i = i * chunksize
- end_i = min((i + 1) * chunksize, nrows)
- if start_i >= end_i:
- break
- chunk_iter = zip(*(arr[start_i:end_i] for arr in data_list))
- num_inserted = exec_insert(conn, keys, chunk_iter)
- # GH 46891
- if is_integer(num_inserted):
- if total_inserted is None:
- total_inserted = num_inserted
- else:
- total_inserted += num_inserted
- return total_inserted
- def _query_iterator(
- self,
- result,
- exit_stack: ExitStack,
- chunksize: str | None,
- columns,
- coerce_float: bool = True,
- parse_dates=None,
- dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
- ):
- """Return generator through chunked result set."""
- has_read_data = False
- with exit_stack:
- while True:
- data = result.fetchmany(chunksize)
- if not data:
- if not has_read_data:
- yield DataFrame.from_records(
- [], columns=columns, coerce_float=coerce_float
- )
- break
- has_read_data = True
- self.frame = _convert_arrays_to_dataframe(
- data, columns, coerce_float, dtype_backend
- )
- self._harmonize_columns(
- parse_dates=parse_dates, dtype_backend=dtype_backend
- )
- if self.index is not None:
- self.frame.set_index(self.index, inplace=True)
- yield self.frame
- def read(
- self,
- exit_stack: ExitStack,
- coerce_float: bool = True,
- parse_dates=None,
- columns=None,
- chunksize=None,
- dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
- ) -> DataFrame | Iterator[DataFrame]:
- from sqlalchemy import select
- if columns is not None and len(columns) > 0:
- cols = [self.table.c[n] for n in columns]
- if self.index is not None:
- for idx in self.index[::-1]:
- cols.insert(0, self.table.c[idx])
- sql_select = select(*cols)
- else:
- sql_select = select(self.table)
- result = self.pd_sql.execute(sql_select)
- column_names = result.keys()
- if chunksize is not None:
- return self._query_iterator(
- result,
- exit_stack,
- chunksize,
- column_names,
- coerce_float=coerce_float,
- parse_dates=parse_dates,
- dtype_backend=dtype_backend,
- )
- else:
- data = result.fetchall()
- self.frame = _convert_arrays_to_dataframe(
- data, column_names, coerce_float, dtype_backend
- )
- self._harmonize_columns(
- parse_dates=parse_dates, dtype_backend=dtype_backend
- )
- if self.index is not None:
- self.frame.set_index(self.index, inplace=True)
- return self.frame
- def _index_name(self, index, index_label):
- # for writing: index=True to include index in sql table
- if index is True:
- nlevels = self.frame.index.nlevels
- # if index_label is specified, set this as index name(s)
- if index_label is not None:
- if not isinstance(index_label, list):
- index_label = [index_label]
- if len(index_label) != nlevels:
- raise ValueError(
- "Length of 'index_label' should match number of "
- f"levels, which is {nlevels}"
- )
- return index_label
- # return the used column labels for the index columns
- if (
- nlevels == 1
- and "index" not in self.frame.columns
- and self.frame.index.name is None
- ):
- return ["index"]
- else:
- return com.fill_missing_names(self.frame.index.names)
- # for reading: index=(list of) string to specify column to set as index
- elif isinstance(index, str):
- return [index]
- elif isinstance(index, list):
- return index
- else:
- return None
- def _get_column_names_and_types(self, dtype_mapper):
- column_names_and_types = []
- if self.index is not None:
- for i, idx_label in enumerate(self.index):
- idx_type = dtype_mapper(self.frame.index._get_level_values(i))
- column_names_and_types.append((str(idx_label), idx_type, True))
- column_names_and_types += [
- (str(self.frame.columns[i]), dtype_mapper(self.frame.iloc[:, i]), False)
- for i in range(len(self.frame.columns))
- ]
- return column_names_and_types
- def _create_table_setup(self):
- from sqlalchemy import (
- Column,
- PrimaryKeyConstraint,
- Table,
- )
- from sqlalchemy.schema import MetaData
- column_names_and_types = self._get_column_names_and_types(self._sqlalchemy_type)
- columns: list[Any] = [
- Column(name, typ, index=is_index)
- for name, typ, is_index in column_names_and_types
- ]
- if self.keys is not None:
- if not is_list_like(self.keys):
- keys = [self.keys]
- else:
- keys = self.keys
- pkc = PrimaryKeyConstraint(*keys, name=self.name + "_pk")
- columns.append(pkc)
- schema = self.schema or self.pd_sql.meta.schema
- # At this point, attach to new metadata, only attach to self.meta
- # once table is created.
- meta = MetaData()
- return Table(self.name, meta, *columns, schema=schema)
- def _harmonize_columns(
- self,
- parse_dates=None,
- dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
- ) -> None:
- """
- Make the DataFrame's column types align with the SQL table
- column types.
- Need to work around limited NA value support. Floats are always
- fine, ints must always be floats if there are Null values.
- Booleans are hard because converting bool column with None replaces
- all Nones with false. Therefore only convert bool if there are no
- NA values.
- Datetimes should already be converted to np.datetime64 if supported,
- but here we also force conversion if required.
- """
- parse_dates = _process_parse_dates_argument(parse_dates)
- for sql_col in self.table.columns:
- col_name = sql_col.name
- try:
- df_col = self.frame[col_name]
- # Handle date parsing upfront; don't try to convert columns
- # twice
- if col_name in parse_dates:
- try:
- fmt = parse_dates[col_name]
- except TypeError:
- fmt = None
- self.frame[col_name] = _handle_date_column(df_col, format=fmt)
- continue
- # the type the dataframe column should have
- col_type = self._get_dtype(sql_col.type)
- if (
- col_type is datetime
- or col_type is date
- or col_type is DatetimeTZDtype
- ):
- # Convert tz-aware Datetime SQL columns to UTC
- utc = col_type is DatetimeTZDtype
- self.frame[col_name] = _handle_date_column(df_col, utc=utc)
- elif dtype_backend == "numpy" and col_type is float:
- # floats support NA, can always convert!
- self.frame[col_name] = df_col.astype(col_type, copy=False)
- elif dtype_backend == "numpy" and len(df_col) == df_col.count():
- # No NA values, can convert ints and bools
- if col_type is np.dtype("int64") or col_type is bool:
- self.frame[col_name] = df_col.astype(col_type, copy=False)
- except KeyError:
- pass # this column not in results
- def _sqlalchemy_type(self, col):
- dtype: DtypeArg = self.dtype or {}
- if is_dict_like(dtype):
- dtype = cast(dict, dtype)
- if col.name in dtype:
- return dtype[col.name]
- # Infer type of column, while ignoring missing values.
- # Needed for inserting typed data containing NULLs, GH 8778.
- col_type = lib.infer_dtype(col, skipna=True)
- from sqlalchemy.types import (
- TIMESTAMP,
- BigInteger,
- Boolean,
- Date,
- DateTime,
- Float,
- Integer,
- SmallInteger,
- Text,
- Time,
- )
- if col_type in ("datetime64", "datetime"):
- # GH 9086: TIMESTAMP is the suggested type if the column contains
- # timezone information
- try:
- if col.dt.tz is not None:
- return TIMESTAMP(timezone=True)
- except AttributeError:
- # The column is actually a DatetimeIndex
- # GH 26761 or an Index with date-like data e.g. 9999-01-01
- if getattr(col, "tz", None) is not None:
- return TIMESTAMP(timezone=True)
- return DateTime
- if col_type == "timedelta64":
- warnings.warn(
- "the 'timedelta' type is not supported, and will be "
- "written as integer values (ns frequency) to the database.",
- UserWarning,
- stacklevel=find_stack_level(),
- )
- return BigInteger
- elif col_type == "floating":
- if col.dtype == "float32":
- return Float(precision=23)
- else:
- return Float(precision=53)
- elif col_type == "integer":
- # GH35076 Map pandas integer to optimal SQLAlchemy integer type
- if col.dtype.name.lower() in ("int8", "uint8", "int16"):
- return SmallInteger
- elif col.dtype.name.lower() in ("uint16", "int32"):
- return Integer
- elif col.dtype.name.lower() == "uint64":
- raise ValueError("Unsigned 64 bit integer datatype is not supported")
- else:
- return BigInteger
- elif col_type == "boolean":
- return Boolean
- elif col_type == "date":
- return Date
- elif col_type == "time":
- return Time
- elif col_type == "complex":
- raise ValueError("Complex datatypes not supported")
- return Text
- def _get_dtype(self, sqltype):
- from sqlalchemy.types import (
- TIMESTAMP,
- Boolean,
- Date,
- DateTime,
- Float,
- Integer,
- )
- if isinstance(sqltype, Float):
- return float
- elif isinstance(sqltype, Integer):
- # TODO: Refine integer size.
- return np.dtype("int64")
- elif isinstance(sqltype, TIMESTAMP):
- # we have a timezone capable type
- if not sqltype.timezone:
- return datetime
- return DatetimeTZDtype
- elif isinstance(sqltype, DateTime):
- # Caution: np.datetime64 is also a subclass of np.number.
- return datetime
- elif isinstance(sqltype, Date):
- return date
- elif isinstance(sqltype, Boolean):
- return bool
- return object
- class PandasSQL(PandasObject, ABC):
- """
- Subclasses Should define read_query and to_sql.
- """
- def __enter__(self):
- return self
- def __exit__(self, *args) -> None:
- pass
- def read_table(
- self,
- table_name: str,
- index_col: str | list[str] | None = None,
- coerce_float: bool = True,
- parse_dates=None,
- columns=None,
- schema: str | None = None,
- chunksize: int | None = None,
- dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
- ) -> DataFrame | Iterator[DataFrame]:
- raise NotImplementedError
- @abstractmethod
- def read_query(
- self,
- sql: str,
- index_col: str | list[str] | None = None,
- coerce_float: bool = True,
- parse_dates=None,
- params=None,
- chunksize: int | None = None,
- dtype: DtypeArg | None = None,
- dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
- ) -> DataFrame | Iterator[DataFrame]:
- pass
- @abstractmethod
- def to_sql(
- self,
- frame,
- name,
- if_exists: Literal["fail", "replace", "append"] = "fail",
- index: bool = True,
- index_label=None,
- schema=None,
- chunksize=None,
- dtype: DtypeArg | None = None,
- method=None,
- engine: str = "auto",
- **engine_kwargs,
- ) -> int | None:
- pass
- @abstractmethod
- def execute(self, sql: str | Select | TextClause, params=None):
- pass
- @abstractmethod
- def has_table(self, name: str, schema: str | None = None) -> bool:
- pass
- @abstractmethod
- def _create_sql_schema(
- self,
- frame: DataFrame,
- table_name: str,
- keys: list[str] | None = None,
- dtype: DtypeArg | None = None,
- schema: str | None = None,
- ):
- pass
- class BaseEngine:
- def insert_records(
- self,
- table: SQLTable,
- con,
- frame,
- name,
- index: bool | str | list[str] | None = True,
- schema=None,
- chunksize=None,
- method=None,
- **engine_kwargs,
- ) -> int | None:
- """
- Inserts data into already-prepared table
- """
- raise AbstractMethodError(self)
- class SQLAlchemyEngine(BaseEngine):
- def __init__(self) -> None:
- import_optional_dependency(
- "sqlalchemy", extra="sqlalchemy is required for SQL support."
- )
- def insert_records(
- self,
- table: SQLTable,
- con,
- frame,
- name,
- index: bool | str | list[str] | None = True,
- schema=None,
- chunksize=None,
- method=None,
- **engine_kwargs,
- ) -> int | None:
- from sqlalchemy import exc
- try:
- return table.insert(chunksize=chunksize, method=method)
- except exc.StatementError as err:
- # GH34431
- # https://stackoverflow.com/a/67358288/6067848
- msg = r"""(\(1054, "Unknown column 'inf(e0)?' in 'field list'"\))(?#
- )|inf can not be used with MySQL"""
- err_text = str(err.orig)
- if re.search(msg, err_text):
- raise ValueError("inf cannot be used with MySQL") from err
- raise err
- def get_engine(engine: str) -> BaseEngine:
- """return our implementation"""
- if engine == "auto":
- engine = get_option("io.sql.engine")
- if engine == "auto":
- # try engines in this order
- engine_classes = [SQLAlchemyEngine]
- error_msgs = ""
- for engine_class in engine_classes:
- try:
- return engine_class()
- except ImportError as err:
- error_msgs += "\n - " + str(err)
- raise ImportError(
- "Unable to find a usable engine; "
- "tried using: 'sqlalchemy'.\n"
- "A suitable version of "
- "sqlalchemy is required for sql I/O "
- "support.\n"
- "Trying to import the above resulted in these errors:"
- f"{error_msgs}"
- )
- if engine == "sqlalchemy":
- return SQLAlchemyEngine()
- raise ValueError("engine must be one of 'auto', 'sqlalchemy'")
- class SQLDatabase(PandasSQL):
- """
- This class enables conversion between DataFrame and SQL databases
- using SQLAlchemy to handle DataBase abstraction.
- Parameters
- ----------
- con : SQLAlchemy Connectable or URI string.
- Connectable to connect with the database. Using SQLAlchemy makes it
- possible to use any DB supported by that library.
- schema : string, default None
- Name of SQL schema in database to write to (if database flavor
- supports this). If None, use default schema (default).
- need_transaction : bool, default False
- If True, SQLDatabase will create a transaction.
- """
- def __init__(
- self, con, schema: str | None = None, need_transaction: bool = False
- ) -> None:
- from sqlalchemy import create_engine
- from sqlalchemy.engine import Engine
- from sqlalchemy.schema import MetaData
- # self.exit_stack cleans up the Engine and Connection and commits the
- # transaction if any of those objects was created below.
- # Cleanup happens either in self.__exit__ or at the end of the iterator
- # returned by read_sql when chunksize is not None.
- self.exit_stack = ExitStack()
- if isinstance(con, str):
- con = create_engine(con)
- self.exit_stack.callback(con.dispose)
- if isinstance(con, Engine):
- con = self.exit_stack.enter_context(con.connect())
- if need_transaction and not con.in_transaction():
- self.exit_stack.enter_context(con.begin())
- self.con = con
- self.meta = MetaData(schema=schema)
- self.returns_generator = False
- def __exit__(self, *args) -> None:
- if not self.returns_generator:
- self.exit_stack.close()
- @contextmanager
- def run_transaction(self):
- if not self.con.in_transaction():
- with self.con.begin():
- yield self.con
- else:
- yield self.con
- def execute(self, sql: str | Select | TextClause, params=None):
- """Simple passthrough to SQLAlchemy connectable"""
- args = [] if params is None else [params]
- if isinstance(sql, str):
- return self.con.exec_driver_sql(sql, *args)
- return self.con.execute(sql, *args)
- def read_table(
- self,
- table_name: str,
- index_col: str | list[str] | None = None,
- coerce_float: bool = True,
- parse_dates=None,
- columns=None,
- schema: str | None = None,
- chunksize: int | None = None,
- dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
- ) -> DataFrame | Iterator[DataFrame]:
- """
- Read SQL database table into a DataFrame.
- Parameters
- ----------
- table_name : str
- Name of SQL table in database.
- index_col : string, optional, default: None
- Column to set as index.
- coerce_float : bool, default True
- Attempts to convert values of non-string, non-numeric objects
- (like decimal.Decimal) to floating point. This can result in
- loss of precision.
- parse_dates : list or dict, default: None
- - List of column names to parse as dates.
- - Dict of ``{column_name: format string}`` where format string is
- strftime compatible in case of parsing string times, or is one of
- (D, s, ns, ms, us) in case of parsing integer timestamps.
- - Dict of ``{column_name: arg}``, where the arg corresponds
- to the keyword arguments of :func:`pandas.to_datetime`.
- Especially useful with databases without native Datetime support,
- such as SQLite.
- columns : list, default: None
- List of column names to select from SQL table.
- schema : string, default None
- Name of SQL schema in database to query (if database flavor
- supports this). If specified, this overwrites the default
- schema of the SQL database object.
- chunksize : int, default None
- If specified, return an iterator where `chunksize` is the number
- of rows to include in each chunk.
- dtype_backend : {{"numpy_nullable", "pyarrow"}}, defaults to NumPy dtypes
- Which dtype_backend to use, e.g. whether a DataFrame should have NumPy
- arrays, nullable dtypes are used for all dtypes that have a nullable
- implementation when "numpy_nullable" is set, pyarrow is used for all
- dtypes if "pyarrow" is set.
- The dtype_backends are still experimential.
- .. versionadded:: 2.0
- Returns
- -------
- DataFrame
- See Also
- --------
- pandas.read_sql_table
- SQLDatabase.read_query
- """
- self.meta.reflect(bind=self.con, only=[table_name])
- table = SQLTable(table_name, self, index=index_col, schema=schema)
- if chunksize is not None:
- self.returns_generator = True
- return table.read(
- self.exit_stack,
- coerce_float=coerce_float,
- parse_dates=parse_dates,
- columns=columns,
- chunksize=chunksize,
- dtype_backend=dtype_backend,
- )
- @staticmethod
- def _query_iterator(
- result,
- exit_stack: ExitStack,
- chunksize: int,
- columns,
- index_col=None,
- coerce_float: bool = True,
- parse_dates=None,
- dtype: DtypeArg | None = None,
- dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
- ):
- """Return generator through chunked result set"""
- has_read_data = False
- with exit_stack:
- while True:
- data = result.fetchmany(chunksize)
- if not data:
- if not has_read_data:
- yield _wrap_result(
- [],
- columns,
- index_col=index_col,
- coerce_float=coerce_float,
- parse_dates=parse_dates,
- dtype=dtype,
- dtype_backend=dtype_backend,
- )
- break
- has_read_data = True
- yield _wrap_result(
- data,
- columns,
- index_col=index_col,
- coerce_float=coerce_float,
- parse_dates=parse_dates,
- dtype=dtype,
- dtype_backend=dtype_backend,
- )
- def read_query(
- self,
- sql: str,
- index_col: str | list[str] | None = None,
- coerce_float: bool = True,
- parse_dates=None,
- params=None,
- chunksize: int | None = None,
- dtype: DtypeArg | None = None,
- dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
- ) -> DataFrame | Iterator[DataFrame]:
- """
- Read SQL query into a DataFrame.
- Parameters
- ----------
- sql : str
- SQL query to be executed.
- index_col : string, optional, default: None
- Column name to use as index for the returned DataFrame object.
- coerce_float : bool, default True
- Attempt to convert values of non-string, non-numeric objects (like
- decimal.Decimal) to floating point, useful for SQL result sets.
- params : list, tuple or dict, optional, default: None
- List of parameters to pass to execute method. The syntax used
- to pass parameters is database driver dependent. Check your
- database driver documentation for which of the five syntax styles,
- described in PEP 249's paramstyle, is supported.
- Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
- parse_dates : list or dict, default: None
- - List of column names to parse as dates.
- - Dict of ``{column_name: format string}`` where format string is
- strftime compatible in case of parsing string times, or is one of
- (D, s, ns, ms, us) in case of parsing integer timestamps.
- - Dict of ``{column_name: arg dict}``, where the arg dict
- corresponds to the keyword arguments of
- :func:`pandas.to_datetime` Especially useful with databases
- without native Datetime support, such as SQLite.
- chunksize : int, default None
- If specified, return an iterator where `chunksize` is the number
- of rows to include in each chunk.
- dtype : Type name or dict of columns
- Data type for data or columns. E.g. np.float64 or
- {‘a’: np.float64, ‘b’: np.int32, ‘c’: ‘Int64’}
- .. versionadded:: 1.3.0
- Returns
- -------
- DataFrame
- See Also
- --------
- read_sql_table : Read SQL database table into a DataFrame.
- read_sql
- """
- result = self.execute(sql, params)
- columns = result.keys()
- if chunksize is not None:
- self.returns_generator = True
- return self._query_iterator(
- result,
- self.exit_stack,
- chunksize,
- columns,
- index_col=index_col,
- coerce_float=coerce_float,
- parse_dates=parse_dates,
- dtype=dtype,
- dtype_backend=dtype_backend,
- )
- else:
- data = result.fetchall()
- frame = _wrap_result(
- data,
- columns,
- index_col=index_col,
- coerce_float=coerce_float,
- parse_dates=parse_dates,
- dtype=dtype,
- dtype_backend=dtype_backend,
- )
- return frame
- read_sql = read_query
- def prep_table(
- self,
- frame,
- name,
- if_exists: Literal["fail", "replace", "append"] = "fail",
- index: bool | str | list[str] | None = True,
- index_label=None,
- schema=None,
- dtype: DtypeArg | None = None,
- ) -> SQLTable:
- """
- Prepares table in the database for data insertion. Creates it if needed, etc.
- """
- if dtype:
- if not is_dict_like(dtype):
- # error: Value expression in dictionary comprehension has incompatible
- # type "Union[ExtensionDtype, str, dtype[Any], Type[object],
- # Dict[Hashable, Union[ExtensionDtype, Union[str, dtype[Any]],
- # Type[str], Type[float], Type[int], Type[complex], Type[bool],
- # Type[object]]]]"; expected type "Union[ExtensionDtype, str,
- # dtype[Any], Type[object]]"
- dtype = {col_name: dtype for col_name in frame} # type: ignore[misc]
- else:
- dtype = cast(dict, dtype)
- from sqlalchemy.types import TypeEngine
- for col, my_type in dtype.items():
- if isinstance(my_type, type) and issubclass(my_type, TypeEngine):
- pass
- elif isinstance(my_type, TypeEngine):
- pass
- else:
- raise ValueError(f"The type of {col} is not a SQLAlchemy type")
- table = SQLTable(
- name,
- self,
- frame=frame,
- index=index,
- if_exists=if_exists,
- index_label=index_label,
- schema=schema,
- dtype=dtype,
- )
- table.create()
- return table
- def check_case_sensitive(
- self,
- name: str,
- schema: str | None,
- ) -> None:
- """
- Checks table name for issues with case-sensitivity.
- Method is called after data is inserted.
- """
- if not name.isdigit() and not name.islower():
- # check for potentially case sensitivity issues (GH7815)
- # Only check when name is not a number and name is not lower case
- from sqlalchemy import inspect as sqlalchemy_inspect
- insp = sqlalchemy_inspect(self.con)
- table_names = insp.get_table_names(schema=schema or self.meta.schema)
- if name not in table_names:
- msg = (
- f"The provided table name '{name}' is not found exactly as "
- "such in the database after writing the table, possibly "
- "due to case sensitivity issues. Consider using lower "
- "case table names."
- )
- warnings.warn(
- msg,
- UserWarning,
- stacklevel=find_stack_level(),
- )
- def to_sql(
- self,
- frame,
- name: str,
- if_exists: Literal["fail", "replace", "append"] = "fail",
- index: bool = True,
- index_label=None,
- schema: str | None = None,
- chunksize=None,
- dtype: DtypeArg | None = None,
- method=None,
- engine: str = "auto",
- **engine_kwargs,
- ) -> int | None:
- """
- Write records stored in a DataFrame to a SQL database.
- Parameters
- ----------
- frame : DataFrame
- name : string
- Name of SQL table.
- if_exists : {'fail', 'replace', 'append'}, default 'fail'
- - fail: If table exists, do nothing.
- - replace: If table exists, drop it, recreate it, and insert data.
- - append: If table exists, insert data. Create if does not exist.
- index : boolean, default True
- Write DataFrame index as a column.
- index_label : string or sequence, default None
- Column label for index column(s). If None is given (default) and
- `index` is True, then the index names are used.
- A sequence should be given if the DataFrame uses MultiIndex.
- schema : string, default None
- Name of SQL schema in database to write to (if database flavor
- supports this). If specified, this overwrites the default
- schema of the SQLDatabase object.
- chunksize : int, default None
- If not None, then rows will be written in batches of this size at a
- time. If None, all rows will be written at once.
- dtype : single type or dict of column name to SQL type, default None
- Optional specifying the datatype for columns. The SQL type should
- be a SQLAlchemy type. If all columns are of the same type, one
- single value can be used.
- method : {None', 'multi', callable}, default None
- Controls the SQL insertion clause used:
- * None : Uses standard SQL ``INSERT`` clause (one per row).
- * 'multi': Pass multiple values in a single ``INSERT`` clause.
- * callable with signature ``(pd_table, conn, keys, data_iter)``.
- Details and a sample callable implementation can be found in the
- section :ref:`insert method <io.sql.method>`.
- engine : {'auto', 'sqlalchemy'}, default 'auto'
- SQL engine library to use. If 'auto', then the option
- ``io.sql.engine`` is used. The default ``io.sql.engine``
- behavior is 'sqlalchemy'
- .. versionadded:: 1.3.0
- **engine_kwargs
- Any additional kwargs are passed to the engine.
- """
- sql_engine = get_engine(engine)
- table = self.prep_table(
- frame=frame,
- name=name,
- if_exists=if_exists,
- index=index,
- index_label=index_label,
- schema=schema,
- dtype=dtype,
- )
- total_inserted = sql_engine.insert_records(
- table=table,
- con=self.con,
- frame=frame,
- name=name,
- index=index,
- schema=schema,
- chunksize=chunksize,
- method=method,
- **engine_kwargs,
- )
- self.check_case_sensitive(name=name, schema=schema)
- return total_inserted
- @property
- def tables(self):
- return self.meta.tables
- def has_table(self, name: str, schema: str | None = None) -> bool:
- from sqlalchemy import inspect as sqlalchemy_inspect
- insp = sqlalchemy_inspect(self.con)
- return insp.has_table(name, schema or self.meta.schema)
- def get_table(self, table_name: str, schema: str | None = None) -> Table:
- from sqlalchemy import (
- Numeric,
- Table,
- )
- schema = schema or self.meta.schema
- tbl = Table(table_name, self.meta, autoload_with=self.con, schema=schema)
- for column in tbl.columns:
- if isinstance(column.type, Numeric):
- column.type.asdecimal = False
- return tbl
- def drop_table(self, table_name: str, schema: str | None = None) -> None:
- schema = schema or self.meta.schema
- if self.has_table(table_name, schema):
- self.meta.reflect(bind=self.con, only=[table_name], schema=schema)
- with self.run_transaction():
- self.get_table(table_name, schema).drop(bind=self.con)
- self.meta.clear()
- def _create_sql_schema(
- self,
- frame: DataFrame,
- table_name: str,
- keys: list[str] | None = None,
- dtype: DtypeArg | None = None,
- schema: str | None = None,
- ):
- table = SQLTable(
- table_name,
- self,
- frame=frame,
- index=False,
- keys=keys,
- dtype=dtype,
- schema=schema,
- )
- return str(table.sql_schema())
- # ---- SQL without SQLAlchemy ---
- # sqlite-specific sql strings and handler class
- # dictionary used for readability purposes
- _SQL_TYPES = {
- "string": "TEXT",
- "floating": "REAL",
- "integer": "INTEGER",
- "datetime": "TIMESTAMP",
- "date": "DATE",
- "time": "TIME",
- "boolean": "INTEGER",
- }
- def _get_unicode_name(name):
- try:
- uname = str(name).encode("utf-8", "strict").decode("utf-8")
- except UnicodeError as err:
- raise ValueError(f"Cannot convert identifier to UTF-8: '{name}'") from err
- return uname
- def _get_valid_sqlite_name(name):
- # See https://stackoverflow.com/questions/6514274/how-do-you-escape-strings\
- # -for-sqlite-table-column-names-in-python
- # Ensure the string can be encoded as UTF-8.
- # Ensure the string does not include any NUL characters.
- # Replace all " with "".
- # Wrap the entire thing in double quotes.
- uname = _get_unicode_name(name)
- if not len(uname):
- raise ValueError("Empty table or column name specified")
- nul_index = uname.find("\x00")
- if nul_index >= 0:
- raise ValueError("SQLite identifier cannot contain NULs")
- return '"' + uname.replace('"', '""') + '"'
- class SQLiteTable(SQLTable):
- """
- Patch the SQLTable for fallback support.
- Instead of a table variable just use the Create Table statement.
- """
- def __init__(self, *args, **kwargs) -> None:
- # GH 8341
- # register an adapter callable for datetime.time object
- import sqlite3
- # this will transform time(12,34,56,789) into '12:34:56.000789'
- # (this is what sqlalchemy does)
- def _adapt_time(t) -> str:
- # This is faster than strftime
- return f"{t.hour:02d}:{t.minute:02d}:{t.second:02d}.{t.microsecond:06d}"
- sqlite3.register_adapter(time, _adapt_time)
- super().__init__(*args, **kwargs)
- def sql_schema(self) -> str:
- return str(";\n".join(self.table))
- def _execute_create(self) -> None:
- with self.pd_sql.run_transaction() as conn:
- for stmt in self.table:
- conn.execute(stmt)
- def insert_statement(self, *, num_rows: int) -> str:
- names = list(map(str, self.frame.columns))
- wld = "?" # wildcard char
- escape = _get_valid_sqlite_name
- if self.index is not None:
- for idx in self.index[::-1]:
- names.insert(0, idx)
- bracketed_names = [escape(column) for column in names]
- col_names = ",".join(bracketed_names)
- row_wildcards = ",".join([wld] * len(names))
- wildcards = ",".join([f"({row_wildcards})" for _ in range(num_rows)])
- insert_statement = (
- f"INSERT INTO {escape(self.name)} ({col_names}) VALUES {wildcards}"
- )
- return insert_statement
- def _execute_insert(self, conn, keys, data_iter) -> int:
- data_list = list(data_iter)
- conn.executemany(self.insert_statement(num_rows=1), data_list)
- return conn.rowcount
- def _execute_insert_multi(self, conn, keys, data_iter) -> int:
- data_list = list(data_iter)
- flattened_data = [x for row in data_list for x in row]
- conn.execute(self.insert_statement(num_rows=len(data_list)), flattened_data)
- return conn.rowcount
- def _create_table_setup(self):
- """
- Return a list of SQL statements that creates a table reflecting the
- structure of a DataFrame. The first entry will be a CREATE TABLE
- statement while the rest will be CREATE INDEX statements.
- """
- column_names_and_types = self._get_column_names_and_types(self._sql_type_name)
- escape = _get_valid_sqlite_name
- create_tbl_stmts = [
- escape(cname) + " " + ctype for cname, ctype, _ in column_names_and_types
- ]
- if self.keys is not None and len(self.keys):
- if not is_list_like(self.keys):
- keys = [self.keys]
- else:
- keys = self.keys
- cnames_br = ", ".join([escape(c) for c in keys])
- create_tbl_stmts.append(
- f"CONSTRAINT {self.name}_pk PRIMARY KEY ({cnames_br})"
- )
- if self.schema:
- schema_name = self.schema + "."
- else:
- schema_name = ""
- create_stmts = [
- "CREATE TABLE "
- + schema_name
- + escape(self.name)
- + " (\n"
- + ",\n ".join(create_tbl_stmts)
- + "\n)"
- ]
- ix_cols = [cname for cname, _, is_index in column_names_and_types if is_index]
- if len(ix_cols):
- cnames = "_".join(ix_cols)
- cnames_br = ",".join([escape(c) for c in ix_cols])
- create_stmts.append(
- "CREATE INDEX "
- + escape("ix_" + self.name + "_" + cnames)
- + "ON "
- + escape(self.name)
- + " ("
- + cnames_br
- + ")"
- )
- return create_stmts
- def _sql_type_name(self, col):
- dtype: DtypeArg = self.dtype or {}
- if is_dict_like(dtype):
- dtype = cast(dict, dtype)
- if col.name in dtype:
- return dtype[col.name]
- # Infer type of column, while ignoring missing values.
- # Needed for inserting typed data containing NULLs, GH 8778.
- col_type = lib.infer_dtype(col, skipna=True)
- if col_type == "timedelta64":
- warnings.warn(
- "the 'timedelta' type is not supported, and will be "
- "written as integer values (ns frequency) to the database.",
- UserWarning,
- stacklevel=find_stack_level(),
- )
- col_type = "integer"
- elif col_type == "datetime64":
- col_type = "datetime"
- elif col_type == "empty":
- col_type = "string"
- elif col_type == "complex":
- raise ValueError("Complex datatypes not supported")
- if col_type not in _SQL_TYPES:
- col_type = "string"
- return _SQL_TYPES[col_type]
- class SQLiteDatabase(PandasSQL):
- """
- Version of SQLDatabase to support SQLite connections (fallback without
- SQLAlchemy). This should only be used internally.
- Parameters
- ----------
- con : sqlite connection object
- """
- def __init__(self, con) -> None:
- self.con = con
- @contextmanager
- def run_transaction(self):
- cur = self.con.cursor()
- try:
- yield cur
- self.con.commit()
- except Exception:
- self.con.rollback()
- raise
- finally:
- cur.close()
- def execute(self, sql: str | Select | TextClause, params=None):
- if not isinstance(sql, str):
- raise TypeError("Query must be a string unless using sqlalchemy.")
- args = [] if params is None else [params]
- cur = self.con.cursor()
- try:
- cur.execute(sql, *args)
- return cur
- except Exception as exc:
- try:
- self.con.rollback()
- except Exception as inner_exc: # pragma: no cover
- ex = DatabaseError(
- f"Execution failed on sql: {sql}\n{exc}\nunable to rollback"
- )
- raise ex from inner_exc
- ex = DatabaseError(f"Execution failed on sql '{sql}': {exc}")
- raise ex from exc
- @staticmethod
- def _query_iterator(
- cursor,
- chunksize: int,
- columns,
- index_col=None,
- coerce_float: bool = True,
- parse_dates=None,
- dtype: DtypeArg | None = None,
- dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
- ):
- """Return generator through chunked result set"""
- has_read_data = False
- while True:
- data = cursor.fetchmany(chunksize)
- if type(data) == tuple:
- data = list(data)
- if not data:
- cursor.close()
- if not has_read_data:
- result = DataFrame.from_records(
- [], columns=columns, coerce_float=coerce_float
- )
- if dtype:
- result = result.astype(dtype)
- yield result
- break
- has_read_data = True
- yield _wrap_result(
- data,
- columns,
- index_col=index_col,
- coerce_float=coerce_float,
- parse_dates=parse_dates,
- dtype=dtype,
- dtype_backend=dtype_backend,
- )
- def read_query(
- self,
- sql,
- index_col=None,
- coerce_float: bool = True,
- parse_dates=None,
- params=None,
- chunksize: int | None = None,
- dtype: DtypeArg | None = None,
- dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
- ) -> DataFrame | Iterator[DataFrame]:
- cursor = self.execute(sql, params)
- columns = [col_desc[0] for col_desc in cursor.description]
- if chunksize is not None:
- return self._query_iterator(
- cursor,
- chunksize,
- columns,
- index_col=index_col,
- coerce_float=coerce_float,
- parse_dates=parse_dates,
- dtype=dtype,
- dtype_backend=dtype_backend,
- )
- else:
- data = self._fetchall_as_list(cursor)
- cursor.close()
- frame = _wrap_result(
- data,
- columns,
- index_col=index_col,
- coerce_float=coerce_float,
- parse_dates=parse_dates,
- dtype=dtype,
- dtype_backend=dtype_backend,
- )
- return frame
- def _fetchall_as_list(self, cur):
- result = cur.fetchall()
- if not isinstance(result, list):
- result = list(result)
- return result
- def to_sql(
- self,
- frame,
- name,
- if_exists: str = "fail",
- index: bool = True,
- index_label=None,
- schema=None,
- chunksize=None,
- dtype: DtypeArg | None = None,
- method=None,
- engine: str = "auto",
- **engine_kwargs,
- ) -> int | None:
- """
- Write records stored in a DataFrame to a SQL database.
- Parameters
- ----------
- frame: DataFrame
- name: string
- Name of SQL table.
- if_exists: {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if it does not exist.
- index : bool, default True
- Write DataFrame index as a column
- index_label : string or sequence, default None
- Column label for index column(s). If None is given (default) and
- `index` is True, then the index names are used.
- A sequence should be given if the DataFrame uses MultiIndex.
- schema : string, default None
- Ignored parameter included for compatibility with SQLAlchemy
- version of ``to_sql``.
- chunksize : int, default None
- If not None, then rows will be written in batches of this
- size at a time. If None, all rows will be written at once.
- dtype : single type or dict of column name to SQL type, default None
- Optional specifying the datatype for columns. The SQL type should
- be a string. If all columns are of the same type, one single value
- can be used.
- method : {None, 'multi', callable}, default None
- Controls the SQL insertion clause used:
- * None : Uses standard SQL ``INSERT`` clause (one per row).
- * 'multi': Pass multiple values in a single ``INSERT`` clause.
- * callable with signature ``(pd_table, conn, keys, data_iter)``.
- Details and a sample callable implementation can be found in the
- section :ref:`insert method <io.sql.method>`.
- """
- if dtype:
- if not is_dict_like(dtype):
- # error: Value expression in dictionary comprehension has incompatible
- # type "Union[ExtensionDtype, str, dtype[Any], Type[object],
- # Dict[Hashable, Union[ExtensionDtype, Union[str, dtype[Any]],
- # Type[str], Type[float], Type[int], Type[complex], Type[bool],
- # Type[object]]]]"; expected type "Union[ExtensionDtype, str,
- # dtype[Any], Type[object]]"
- dtype = {col_name: dtype for col_name in frame} # type: ignore[misc]
- else:
- dtype = cast(dict, dtype)
- for col, my_type in dtype.items():
- if not isinstance(my_type, str):
- raise ValueError(f"{col} ({my_type}) not a string")
- table = SQLiteTable(
- name,
- self,
- frame=frame,
- index=index,
- if_exists=if_exists,
- index_label=index_label,
- dtype=dtype,
- )
- table.create()
- return table.insert(chunksize, method)
- def has_table(self, name: str, schema: str | None = None) -> bool:
- wld = "?"
- query = f"SELECT name FROM sqlite_master WHERE type='table' AND name={wld};"
- return len(self.execute(query, [name]).fetchall()) > 0
- def get_table(self, table_name: str, schema: str | None = None) -> None:
- return None # not supported in fallback mode
- def drop_table(self, name: str, schema: str | None = None) -> None:
- drop_sql = f"DROP TABLE {_get_valid_sqlite_name(name)}"
- self.execute(drop_sql)
- def _create_sql_schema(
- self,
- frame,
- table_name: str,
- keys=None,
- dtype: DtypeArg | None = None,
- schema: str | None = None,
- ):
- table = SQLiteTable(
- table_name,
- self,
- frame=frame,
- index=False,
- keys=keys,
- dtype=dtype,
- schema=schema,
- )
- return str(table.sql_schema())
- def get_schema(
- frame,
- name: str,
- keys=None,
- con=None,
- dtype: DtypeArg | None = None,
- schema: str | None = None,
- ) -> str:
- """
- Get the SQL db table schema for the given frame.
- Parameters
- ----------
- frame : DataFrame
- name : str
- name of SQL table
- keys : string or sequence, default: None
- columns to use a primary key
- con: an open SQL database connection object or a SQLAlchemy connectable
- Using SQLAlchemy makes it possible to use any DB supported by that
- library, default: None
- If a DBAPI2 object, only sqlite3 is supported.
- dtype : dict of column name to SQL type, default None
- Optional specifying the datatype for columns. The SQL type should
- be a SQLAlchemy type, or a string for sqlite3 fallback connection.
- schema: str, default: None
- Optional specifying the schema to be used in creating the table.
- .. versionadded:: 1.2.0
- """
- with pandasSQL_builder(con=con) as pandas_sql:
- return pandas_sql._create_sql_schema(
- frame, name, keys=keys, dtype=dtype, schema=schema
- )
|