_json.py 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420
  1. from __future__ import annotations
  2. from abc import (
  3. ABC,
  4. abstractmethod,
  5. )
  6. from collections import abc
  7. from io import StringIO
  8. from itertools import islice
  9. from types import TracebackType
  10. from typing import (
  11. TYPE_CHECKING,
  12. Any,
  13. Callable,
  14. Generic,
  15. Literal,
  16. Mapping,
  17. TypeVar,
  18. overload,
  19. )
  20. import numpy as np
  21. from pandas._libs import lib
  22. from pandas._libs.json import (
  23. dumps,
  24. loads,
  25. )
  26. from pandas._libs.tslibs import iNaT
  27. from pandas._typing import (
  28. CompressionOptions,
  29. DtypeArg,
  30. DtypeBackend,
  31. FilePath,
  32. IndexLabel,
  33. JSONEngine,
  34. JSONSerializable,
  35. ReadBuffer,
  36. StorageOptions,
  37. WriteBuffer,
  38. )
  39. from pandas.compat._optional import import_optional_dependency
  40. from pandas.errors import AbstractMethodError
  41. from pandas.util._decorators import doc
  42. from pandas.util._validators import check_dtype_backend
  43. from pandas.core.dtypes.common import (
  44. ensure_str,
  45. is_period_dtype,
  46. )
  47. from pandas.core.dtypes.generic import ABCIndex
  48. from pandas import (
  49. ArrowDtype,
  50. DataFrame,
  51. MultiIndex,
  52. Series,
  53. isna,
  54. notna,
  55. to_datetime,
  56. )
  57. from pandas.core.reshape.concat import concat
  58. from pandas.core.shared_docs import _shared_docs
  59. from pandas.io.common import (
  60. IOHandles,
  61. dedup_names,
  62. extension_to_compression,
  63. file_exists,
  64. get_handle,
  65. is_fsspec_url,
  66. is_potential_multi_index,
  67. is_url,
  68. stringify_path,
  69. )
  70. from pandas.io.json._normalize import convert_to_line_delimits
  71. from pandas.io.json._table_schema import (
  72. build_table_schema,
  73. parse_table_schema,
  74. )
  75. from pandas.io.parsers.readers import validate_integer
  76. if TYPE_CHECKING:
  77. from pandas.core.generic import NDFrame
  78. FrameSeriesStrT = TypeVar("FrameSeriesStrT", bound=Literal["frame", "series"])
  79. # interface to/from
  80. @overload
  81. def to_json(
  82. path_or_buf: FilePath | WriteBuffer[str] | WriteBuffer[bytes],
  83. obj: NDFrame,
  84. orient: str | None = ...,
  85. date_format: str = ...,
  86. double_precision: int = ...,
  87. force_ascii: bool = ...,
  88. date_unit: str = ...,
  89. default_handler: Callable[[Any], JSONSerializable] | None = ...,
  90. lines: bool = ...,
  91. compression: CompressionOptions = ...,
  92. index: bool = ...,
  93. indent: int = ...,
  94. storage_options: StorageOptions = ...,
  95. mode: Literal["a", "w"] = ...,
  96. ) -> None:
  97. ...
  98. @overload
  99. def to_json(
  100. path_or_buf: None,
  101. obj: NDFrame,
  102. orient: str | None = ...,
  103. date_format: str = ...,
  104. double_precision: int = ...,
  105. force_ascii: bool = ...,
  106. date_unit: str = ...,
  107. default_handler: Callable[[Any], JSONSerializable] | None = ...,
  108. lines: bool = ...,
  109. compression: CompressionOptions = ...,
  110. index: bool = ...,
  111. indent: int = ...,
  112. storage_options: StorageOptions = ...,
  113. mode: Literal["a", "w"] = ...,
  114. ) -> str:
  115. ...
  116. def to_json(
  117. path_or_buf: FilePath | WriteBuffer[str] | WriteBuffer[bytes] | None,
  118. obj: NDFrame,
  119. orient: str | None = None,
  120. date_format: str = "epoch",
  121. double_precision: int = 10,
  122. force_ascii: bool = True,
  123. date_unit: str = "ms",
  124. default_handler: Callable[[Any], JSONSerializable] | None = None,
  125. lines: bool = False,
  126. compression: CompressionOptions = "infer",
  127. index: bool = True,
  128. indent: int = 0,
  129. storage_options: StorageOptions = None,
  130. mode: Literal["a", "w"] = "w",
  131. ) -> str | None:
  132. if not index and orient not in ["split", "table"]:
  133. raise ValueError(
  134. "'index=False' is only valid when 'orient' is 'split' or 'table'"
  135. )
  136. if lines and orient != "records":
  137. raise ValueError("'lines' keyword only valid when 'orient' is records")
  138. if mode not in ["a", "w"]:
  139. msg = (
  140. f"mode={mode} is not a valid option."
  141. "Only 'w' and 'a' are currently supported."
  142. )
  143. raise ValueError(msg)
  144. if mode == "a" and (not lines or orient != "records"):
  145. msg = (
  146. "mode='a' (append) is only supported when"
  147. "lines is True and orient is 'records'"
  148. )
  149. raise ValueError(msg)
  150. if orient == "table" and isinstance(obj, Series):
  151. obj = obj.to_frame(name=obj.name or "values")
  152. writer: type[Writer]
  153. if orient == "table" and isinstance(obj, DataFrame):
  154. writer = JSONTableWriter
  155. elif isinstance(obj, Series):
  156. writer = SeriesWriter
  157. elif isinstance(obj, DataFrame):
  158. writer = FrameWriter
  159. else:
  160. raise NotImplementedError("'obj' should be a Series or a DataFrame")
  161. s = writer(
  162. obj,
  163. orient=orient,
  164. date_format=date_format,
  165. double_precision=double_precision,
  166. ensure_ascii=force_ascii,
  167. date_unit=date_unit,
  168. default_handler=default_handler,
  169. index=index,
  170. indent=indent,
  171. ).write()
  172. if lines:
  173. s = convert_to_line_delimits(s)
  174. if path_or_buf is not None:
  175. # apply compression and byte/text conversion
  176. with get_handle(
  177. path_or_buf, mode, compression=compression, storage_options=storage_options
  178. ) as handles:
  179. handles.handle.write(s)
  180. else:
  181. return s
  182. return None
  183. class Writer(ABC):
  184. _default_orient: str
  185. def __init__(
  186. self,
  187. obj: NDFrame,
  188. orient: str | None,
  189. date_format: str,
  190. double_precision: int,
  191. ensure_ascii: bool,
  192. date_unit: str,
  193. index: bool,
  194. default_handler: Callable[[Any], JSONSerializable] | None = None,
  195. indent: int = 0,
  196. ) -> None:
  197. self.obj = obj
  198. if orient is None:
  199. orient = self._default_orient
  200. self.orient = orient
  201. self.date_format = date_format
  202. self.double_precision = double_precision
  203. self.ensure_ascii = ensure_ascii
  204. self.date_unit = date_unit
  205. self.default_handler = default_handler
  206. self.index = index
  207. self.indent = indent
  208. self.is_copy = None
  209. self._format_axes()
  210. def _format_axes(self):
  211. raise AbstractMethodError(self)
  212. def write(self) -> str:
  213. iso_dates = self.date_format == "iso"
  214. return dumps(
  215. self.obj_to_write,
  216. orient=self.orient,
  217. double_precision=self.double_precision,
  218. ensure_ascii=self.ensure_ascii,
  219. date_unit=self.date_unit,
  220. iso_dates=iso_dates,
  221. default_handler=self.default_handler,
  222. indent=self.indent,
  223. )
  224. @property
  225. @abstractmethod
  226. def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]:
  227. """Object to write in JSON format."""
  228. class SeriesWriter(Writer):
  229. _default_orient = "index"
  230. @property
  231. def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]:
  232. if not self.index and self.orient == "split":
  233. return {"name": self.obj.name, "data": self.obj.values}
  234. else:
  235. return self.obj
  236. def _format_axes(self):
  237. if not self.obj.index.is_unique and self.orient == "index":
  238. raise ValueError(f"Series index must be unique for orient='{self.orient}'")
  239. class FrameWriter(Writer):
  240. _default_orient = "columns"
  241. @property
  242. def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]:
  243. if not self.index and self.orient == "split":
  244. obj_to_write = self.obj.to_dict(orient="split")
  245. del obj_to_write["index"]
  246. else:
  247. obj_to_write = self.obj
  248. return obj_to_write
  249. def _format_axes(self):
  250. """
  251. Try to format axes if they are datelike.
  252. """
  253. if not self.obj.index.is_unique and self.orient in ("index", "columns"):
  254. raise ValueError(
  255. f"DataFrame index must be unique for orient='{self.orient}'."
  256. )
  257. if not self.obj.columns.is_unique and self.orient in (
  258. "index",
  259. "columns",
  260. "records",
  261. ):
  262. raise ValueError(
  263. f"DataFrame columns must be unique for orient='{self.orient}'."
  264. )
  265. class JSONTableWriter(FrameWriter):
  266. _default_orient = "records"
  267. def __init__(
  268. self,
  269. obj,
  270. orient: str | None,
  271. date_format: str,
  272. double_precision: int,
  273. ensure_ascii: bool,
  274. date_unit: str,
  275. index: bool,
  276. default_handler: Callable[[Any], JSONSerializable] | None = None,
  277. indent: int = 0,
  278. ) -> None:
  279. """
  280. Adds a `schema` attribute with the Table Schema, resets
  281. the index (can't do in caller, because the schema inference needs
  282. to know what the index is, forces orient to records, and forces
  283. date_format to 'iso'.
  284. """
  285. super().__init__(
  286. obj,
  287. orient,
  288. date_format,
  289. double_precision,
  290. ensure_ascii,
  291. date_unit,
  292. index,
  293. default_handler=default_handler,
  294. indent=indent,
  295. )
  296. if date_format != "iso":
  297. msg = (
  298. "Trying to write with `orient='table'` and "
  299. f"`date_format='{date_format}'`. Table Schema requires dates "
  300. "to be formatted with `date_format='iso'`"
  301. )
  302. raise ValueError(msg)
  303. self.schema = build_table_schema(obj, index=self.index)
  304. # NotImplemented on a column MultiIndex
  305. if obj.ndim == 2 and isinstance(obj.columns, MultiIndex):
  306. raise NotImplementedError(
  307. "orient='table' is not supported for MultiIndex columns"
  308. )
  309. # TODO: Do this timedelta properly in objToJSON.c See GH #15137
  310. if (
  311. (obj.ndim == 1)
  312. and (obj.name in set(obj.index.names))
  313. or len(obj.columns.intersection(obj.index.names))
  314. ):
  315. msg = "Overlapping names between the index and columns"
  316. raise ValueError(msg)
  317. obj = obj.copy()
  318. timedeltas = obj.select_dtypes(include=["timedelta"]).columns
  319. if len(timedeltas):
  320. obj[timedeltas] = obj[timedeltas].applymap(lambda x: x.isoformat())
  321. # Convert PeriodIndex to datetimes before serializing
  322. if is_period_dtype(obj.index.dtype):
  323. obj.index = obj.index.to_timestamp()
  324. # exclude index from obj if index=False
  325. if not self.index:
  326. self.obj = obj.reset_index(drop=True)
  327. else:
  328. self.obj = obj.reset_index(drop=False)
  329. self.date_format = "iso"
  330. self.orient = "records"
  331. self.index = index
  332. @property
  333. def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]:
  334. return {"schema": self.schema, "data": self.obj}
  335. @overload
  336. def read_json(
  337. path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes],
  338. *,
  339. orient: str | None = ...,
  340. typ: Literal["frame"] = ...,
  341. dtype: DtypeArg | None = ...,
  342. convert_axes=...,
  343. convert_dates: bool | list[str] = ...,
  344. keep_default_dates: bool = ...,
  345. precise_float: bool = ...,
  346. date_unit: str | None = ...,
  347. encoding: str | None = ...,
  348. encoding_errors: str | None = ...,
  349. lines: bool = ...,
  350. chunksize: int,
  351. compression: CompressionOptions = ...,
  352. nrows: int | None = ...,
  353. storage_options: StorageOptions = ...,
  354. dtype_backend: DtypeBackend | lib.NoDefault = ...,
  355. engine: JSONEngine = ...,
  356. ) -> JsonReader[Literal["frame"]]:
  357. ...
  358. @overload
  359. def read_json(
  360. path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes],
  361. *,
  362. orient: str | None = ...,
  363. typ: Literal["series"],
  364. dtype: DtypeArg | None = ...,
  365. convert_axes=...,
  366. convert_dates: bool | list[str] = ...,
  367. keep_default_dates: bool = ...,
  368. precise_float: bool = ...,
  369. date_unit: str | None = ...,
  370. encoding: str | None = ...,
  371. encoding_errors: str | None = ...,
  372. lines: bool = ...,
  373. chunksize: int,
  374. compression: CompressionOptions = ...,
  375. nrows: int | None = ...,
  376. storage_options: StorageOptions = ...,
  377. dtype_backend: DtypeBackend | lib.NoDefault = ...,
  378. engine: JSONEngine = ...,
  379. ) -> JsonReader[Literal["series"]]:
  380. ...
  381. @overload
  382. def read_json(
  383. path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes],
  384. *,
  385. orient: str | None = ...,
  386. typ: Literal["series"],
  387. dtype: DtypeArg | None = ...,
  388. convert_axes=...,
  389. convert_dates: bool | list[str] = ...,
  390. keep_default_dates: bool = ...,
  391. precise_float: bool = ...,
  392. date_unit: str | None = ...,
  393. encoding: str | None = ...,
  394. encoding_errors: str | None = ...,
  395. lines: bool = ...,
  396. chunksize: None = ...,
  397. compression: CompressionOptions = ...,
  398. nrows: int | None = ...,
  399. storage_options: StorageOptions = ...,
  400. dtype_backend: DtypeBackend | lib.NoDefault = ...,
  401. engine: JSONEngine = ...,
  402. ) -> Series:
  403. ...
  404. @overload
  405. def read_json(
  406. path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes],
  407. *,
  408. orient: str | None = ...,
  409. typ: Literal["frame"] = ...,
  410. dtype: DtypeArg | None = ...,
  411. convert_axes=...,
  412. convert_dates: bool | list[str] = ...,
  413. keep_default_dates: bool = ...,
  414. precise_float: bool = ...,
  415. date_unit: str | None = ...,
  416. encoding: str | None = ...,
  417. encoding_errors: str | None = ...,
  418. lines: bool = ...,
  419. chunksize: None = ...,
  420. compression: CompressionOptions = ...,
  421. nrows: int | None = ...,
  422. storage_options: StorageOptions = ...,
  423. dtype_backend: DtypeBackend | lib.NoDefault = ...,
  424. engine: JSONEngine = ...,
  425. ) -> DataFrame:
  426. ...
  427. @doc(
  428. storage_options=_shared_docs["storage_options"],
  429. decompression_options=_shared_docs["decompression_options"] % "path_or_buf",
  430. )
  431. def read_json(
  432. path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes],
  433. *,
  434. orient: str | None = None,
  435. typ: Literal["frame", "series"] = "frame",
  436. dtype: DtypeArg | None = None,
  437. convert_axes=None,
  438. convert_dates: bool | list[str] = True,
  439. keep_default_dates: bool = True,
  440. precise_float: bool = False,
  441. date_unit: str | None = None,
  442. encoding: str | None = None,
  443. encoding_errors: str | None = "strict",
  444. lines: bool = False,
  445. chunksize: int | None = None,
  446. compression: CompressionOptions = "infer",
  447. nrows: int | None = None,
  448. storage_options: StorageOptions = None,
  449. dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
  450. engine: JSONEngine = "ujson",
  451. ) -> DataFrame | Series | JsonReader:
  452. """
  453. Convert a JSON string to pandas object.
  454. Parameters
  455. ----------
  456. path_or_buf : a valid JSON str, path object or file-like object
  457. Any valid string path is acceptable. The string could be a URL. Valid
  458. URL schemes include http, ftp, s3, and file. For file URLs, a host is
  459. expected. A local file could be:
  460. ``file://localhost/path/to/table.json``.
  461. If you want to pass in a path object, pandas accepts any
  462. ``os.PathLike``.
  463. By file-like object, we refer to objects with a ``read()`` method,
  464. such as a file handle (e.g. via builtin ``open`` function)
  465. or ``StringIO``.
  466. orient : str, optional
  467. Indication of expected JSON string format.
  468. Compatible JSON strings can be produced by ``to_json()`` with a
  469. corresponding orient value.
  470. The set of possible orients is:
  471. - ``'split'`` : dict like
  472. ``{{index -> [index], columns -> [columns], data -> [values]}}``
  473. - ``'records'`` : list like
  474. ``[{{column -> value}}, ... , {{column -> value}}]``
  475. - ``'index'`` : dict like ``{{index -> {{column -> value}}}}``
  476. - ``'columns'`` : dict like ``{{column -> {{index -> value}}}}``
  477. - ``'values'`` : just the values array
  478. The allowed and default values depend on the value
  479. of the `typ` parameter.
  480. * when ``typ == 'series'``,
  481. - allowed orients are ``{{'split','records','index'}}``
  482. - default is ``'index'``
  483. - The Series index must be unique for orient ``'index'``.
  484. * when ``typ == 'frame'``,
  485. - allowed orients are ``{{'split','records','index',
  486. 'columns','values', 'table'}}``
  487. - default is ``'columns'``
  488. - The DataFrame index must be unique for orients ``'index'`` and
  489. ``'columns'``.
  490. - The DataFrame columns must be unique for orients ``'index'``,
  491. ``'columns'``, and ``'records'``.
  492. typ : {{'frame', 'series'}}, default 'frame'
  493. The type of object to recover.
  494. dtype : bool or dict, default None
  495. If True, infer dtypes; if a dict of column to dtype, then use those;
  496. if False, then don't infer dtypes at all, applies only to the data.
  497. For all ``orient`` values except ``'table'``, default is True.
  498. convert_axes : bool, default None
  499. Try to convert the axes to the proper dtypes.
  500. For all ``orient`` values except ``'table'``, default is True.
  501. convert_dates : bool or list of str, default True
  502. If True then default datelike columns may be converted (depending on
  503. keep_default_dates).
  504. If False, no dates will be converted.
  505. If a list of column names, then those columns will be converted and
  506. default datelike columns may also be converted (depending on
  507. keep_default_dates).
  508. keep_default_dates : bool, default True
  509. If parsing dates (convert_dates is not False), then try to parse the
  510. default datelike columns.
  511. A column label is datelike if
  512. * it ends with ``'_at'``,
  513. * it ends with ``'_time'``,
  514. * it begins with ``'timestamp'``,
  515. * it is ``'modified'``, or
  516. * it is ``'date'``.
  517. precise_float : bool, default False
  518. Set to enable usage of higher precision (strtod) function when
  519. decoding string to double values. Default (False) is to use fast but
  520. less precise builtin functionality.
  521. date_unit : str, default None
  522. The timestamp unit to detect if converting dates. The default behaviour
  523. is to try and detect the correct precision, but if this is not desired
  524. then pass one of 's', 'ms', 'us' or 'ns' to force parsing only seconds,
  525. milliseconds, microseconds or nanoseconds respectively.
  526. encoding : str, default is 'utf-8'
  527. The encoding to use to decode py3 bytes.
  528. encoding_errors : str, optional, default "strict"
  529. How encoding errors are treated. `List of possible values
  530. <https://docs.python.org/3/library/codecs.html#error-handlers>`_ .
  531. .. versionadded:: 1.3.0
  532. lines : bool, default False
  533. Read the file as a json object per line.
  534. chunksize : int, optional
  535. Return JsonReader object for iteration.
  536. See the `line-delimited json docs
  537. <https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#line-delimited-json>`_
  538. for more information on ``chunksize``.
  539. This can only be passed if `lines=True`.
  540. If this is None, the file will be read into memory all at once.
  541. .. versionchanged:: 1.2
  542. ``JsonReader`` is a context manager.
  543. {decompression_options}
  544. .. versionchanged:: 1.4.0 Zstandard support.
  545. nrows : int, optional
  546. The number of lines from the line-delimited jsonfile that has to be read.
  547. This can only be passed if `lines=True`.
  548. If this is None, all the rows will be returned.
  549. .. versionadded:: 1.1
  550. {storage_options}
  551. .. versionadded:: 1.2.0
  552. dtype_backend : {{"numpy_nullable", "pyarrow"}}, defaults to NumPy backed DataFrames
  553. Which dtype_backend to use, e.g. whether a DataFrame should have NumPy
  554. arrays, nullable dtypes are used for all dtypes that have a nullable
  555. implementation when "numpy_nullable" is set, pyarrow is used for all
  556. dtypes if "pyarrow" is set.
  557. The dtype_backends are still experimential.
  558. .. versionadded:: 2.0
  559. engine : {{"ujson", "pyarrow"}}, default "ujson"
  560. Parser engine to use. The ``"pyarrow"`` engine is only available when
  561. ``lines=True``.
  562. .. versionadded:: 2.0
  563. Returns
  564. -------
  565. Series or DataFrame
  566. The type returned depends on the value of `typ`.
  567. See Also
  568. --------
  569. DataFrame.to_json : Convert a DataFrame to a JSON string.
  570. Series.to_json : Convert a Series to a JSON string.
  571. json_normalize : Normalize semi-structured JSON data into a flat table.
  572. Notes
  573. -----
  574. Specific to ``orient='table'``, if a :class:`DataFrame` with a literal
  575. :class:`Index` name of `index` gets written with :func:`to_json`, the
  576. subsequent read operation will incorrectly set the :class:`Index` name to
  577. ``None``. This is because `index` is also used by :func:`DataFrame.to_json`
  578. to denote a missing :class:`Index` name, and the subsequent
  579. :func:`read_json` operation cannot distinguish between the two. The same
  580. limitation is encountered with a :class:`MultiIndex` and any names
  581. beginning with ``'level_'``.
  582. Examples
  583. --------
  584. >>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],
  585. ... index=['row 1', 'row 2'],
  586. ... columns=['col 1', 'col 2'])
  587. Encoding/decoding a Dataframe using ``'split'`` formatted JSON:
  588. >>> df.to_json(orient='split')
  589. '\
  590. {{\
  591. "columns":["col 1","col 2"],\
  592. "index":["row 1","row 2"],\
  593. "data":[["a","b"],["c","d"]]\
  594. }}\
  595. '
  596. >>> pd.read_json(_, orient='split')
  597. col 1 col 2
  598. row 1 a b
  599. row 2 c d
  600. Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
  601. >>> df.to_json(orient='index')
  602. '{{"row 1":{{"col 1":"a","col 2":"b"}},"row 2":{{"col 1":"c","col 2":"d"}}}}'
  603. >>> pd.read_json(_, orient='index')
  604. col 1 col 2
  605. row 1 a b
  606. row 2 c d
  607. Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
  608. Note that index labels are not preserved with this encoding.
  609. >>> df.to_json(orient='records')
  610. '[{{"col 1":"a","col 2":"b"}},{{"col 1":"c","col 2":"d"}}]'
  611. >>> pd.read_json(_, orient='records')
  612. col 1 col 2
  613. 0 a b
  614. 1 c d
  615. Encoding with Table Schema
  616. >>> df.to_json(orient='table')
  617. '\
  618. {{"schema":{{"fields":[\
  619. {{"name":"index","type":"string"}},\
  620. {{"name":"col 1","type":"string"}},\
  621. {{"name":"col 2","type":"string"}}],\
  622. "primaryKey":["index"],\
  623. "pandas_version":"1.4.0"}},\
  624. "data":[\
  625. {{"index":"row 1","col 1":"a","col 2":"b"}},\
  626. {{"index":"row 2","col 1":"c","col 2":"d"}}]\
  627. }}\
  628. '
  629. """
  630. if orient == "table" and dtype:
  631. raise ValueError("cannot pass both dtype and orient='table'")
  632. if orient == "table" and convert_axes:
  633. raise ValueError("cannot pass both convert_axes and orient='table'")
  634. check_dtype_backend(dtype_backend)
  635. if dtype is None and orient != "table":
  636. # error: Incompatible types in assignment (expression has type "bool", variable
  637. # has type "Union[ExtensionDtype, str, dtype[Any], Type[str], Type[float],
  638. # Type[int], Type[complex], Type[bool], Type[object], Dict[Hashable,
  639. # Union[ExtensionDtype, Union[str, dtype[Any]], Type[str], Type[float],
  640. # Type[int], Type[complex], Type[bool], Type[object]]], None]")
  641. dtype = True # type: ignore[assignment]
  642. if convert_axes is None and orient != "table":
  643. convert_axes = True
  644. json_reader = JsonReader(
  645. path_or_buf,
  646. orient=orient,
  647. typ=typ,
  648. dtype=dtype,
  649. convert_axes=convert_axes,
  650. convert_dates=convert_dates,
  651. keep_default_dates=keep_default_dates,
  652. precise_float=precise_float,
  653. date_unit=date_unit,
  654. encoding=encoding,
  655. lines=lines,
  656. chunksize=chunksize,
  657. compression=compression,
  658. nrows=nrows,
  659. storage_options=storage_options,
  660. encoding_errors=encoding_errors,
  661. dtype_backend=dtype_backend,
  662. engine=engine,
  663. )
  664. if chunksize:
  665. return json_reader
  666. else:
  667. return json_reader.read()
  668. class JsonReader(abc.Iterator, Generic[FrameSeriesStrT]):
  669. """
  670. JsonReader provides an interface for reading in a JSON file.
  671. If initialized with ``lines=True`` and ``chunksize``, can be iterated over
  672. ``chunksize`` lines at a time. Otherwise, calling ``read`` reads in the
  673. whole document.
  674. """
  675. def __init__(
  676. self,
  677. filepath_or_buffer,
  678. orient,
  679. typ: FrameSeriesStrT,
  680. dtype,
  681. convert_axes,
  682. convert_dates,
  683. keep_default_dates: bool,
  684. precise_float: bool,
  685. date_unit,
  686. encoding,
  687. lines: bool,
  688. chunksize: int | None,
  689. compression: CompressionOptions,
  690. nrows: int | None,
  691. storage_options: StorageOptions = None,
  692. encoding_errors: str | None = "strict",
  693. dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
  694. engine: JSONEngine = "ujson",
  695. ) -> None:
  696. self.orient = orient
  697. self.typ = typ
  698. self.dtype = dtype
  699. self.convert_axes = convert_axes
  700. self.convert_dates = convert_dates
  701. self.keep_default_dates = keep_default_dates
  702. self.precise_float = precise_float
  703. self.date_unit = date_unit
  704. self.encoding = encoding
  705. self.engine = engine
  706. self.compression = compression
  707. self.storage_options = storage_options
  708. self.lines = lines
  709. self.chunksize = chunksize
  710. self.nrows_seen = 0
  711. self.nrows = nrows
  712. self.encoding_errors = encoding_errors
  713. self.handles: IOHandles[str] | None = None
  714. self.dtype_backend = dtype_backend
  715. if self.engine not in {"pyarrow", "ujson"}:
  716. raise ValueError(
  717. f"The engine type {self.engine} is currently not supported."
  718. )
  719. if self.chunksize is not None:
  720. self.chunksize = validate_integer("chunksize", self.chunksize, 1)
  721. if not self.lines:
  722. raise ValueError("chunksize can only be passed if lines=True")
  723. if self.engine == "pyarrow":
  724. raise ValueError(
  725. "currently pyarrow engine doesn't support chunksize parameter"
  726. )
  727. if self.nrows is not None:
  728. self.nrows = validate_integer("nrows", self.nrows, 0)
  729. if not self.lines:
  730. raise ValueError("nrows can only be passed if lines=True")
  731. if self.engine == "pyarrow":
  732. if not self.lines:
  733. raise ValueError(
  734. "currently pyarrow engine only supports "
  735. "the line-delimited JSON format"
  736. )
  737. self.data = filepath_or_buffer
  738. elif self.engine == "ujson":
  739. data = self._get_data_from_filepath(filepath_or_buffer)
  740. self.data = self._preprocess_data(data)
  741. def _preprocess_data(self, data):
  742. """
  743. At this point, the data either has a `read` attribute (e.g. a file
  744. object or a StringIO) or is a string that is a JSON document.
  745. If self.chunksize, we prepare the data for the `__next__` method.
  746. Otherwise, we read it into memory for the `read` method.
  747. """
  748. if hasattr(data, "read") and not (self.chunksize or self.nrows):
  749. with self:
  750. data = data.read()
  751. if not hasattr(data, "read") and (self.chunksize or self.nrows):
  752. data = StringIO(data)
  753. return data
  754. def _get_data_from_filepath(self, filepath_or_buffer):
  755. """
  756. The function read_json accepts three input types:
  757. 1. filepath (string-like)
  758. 2. file-like object (e.g. open file object, StringIO)
  759. 3. JSON string
  760. This method turns (1) into (2) to simplify the rest of the processing.
  761. It returns input types (2) and (3) unchanged.
  762. It raises FileNotFoundError if the input is a string ending in
  763. one of .json, .json.gz, .json.bz2, etc. but no such file exists.
  764. """
  765. # if it is a string but the file does not exist, it might be a JSON string
  766. filepath_or_buffer = stringify_path(filepath_or_buffer)
  767. if (
  768. not isinstance(filepath_or_buffer, str)
  769. or is_url(filepath_or_buffer)
  770. or is_fsspec_url(filepath_or_buffer)
  771. or file_exists(filepath_or_buffer)
  772. ):
  773. self.handles = get_handle(
  774. filepath_or_buffer,
  775. "r",
  776. encoding=self.encoding,
  777. compression=self.compression,
  778. storage_options=self.storage_options,
  779. errors=self.encoding_errors,
  780. )
  781. filepath_or_buffer = self.handles.handle
  782. elif (
  783. isinstance(filepath_or_buffer, str)
  784. and filepath_or_buffer.lower().endswith(
  785. (".json",) + tuple(f".json{c}" for c in extension_to_compression)
  786. )
  787. and not file_exists(filepath_or_buffer)
  788. ):
  789. raise FileNotFoundError(f"File {filepath_or_buffer} does not exist")
  790. return filepath_or_buffer
  791. def _combine_lines(self, lines) -> str:
  792. """
  793. Combines a list of JSON objects into one JSON object.
  794. """
  795. return (
  796. f'[{",".join([line for line in (line.strip() for line in lines) if line])}]'
  797. )
  798. @overload
  799. def read(self: JsonReader[Literal["frame"]]) -> DataFrame:
  800. ...
  801. @overload
  802. def read(self: JsonReader[Literal["series"]]) -> Series:
  803. ...
  804. @overload
  805. def read(self: JsonReader[Literal["frame", "series"]]) -> DataFrame | Series:
  806. ...
  807. def read(self) -> DataFrame | Series:
  808. """
  809. Read the whole JSON input into a pandas object.
  810. """
  811. obj: DataFrame | Series
  812. with self:
  813. if self.engine == "pyarrow":
  814. pyarrow_json = import_optional_dependency("pyarrow.json")
  815. pa_table = pyarrow_json.read_json(self.data)
  816. mapping: type[ArrowDtype] | None | Callable
  817. if self.dtype_backend == "pyarrow":
  818. mapping = ArrowDtype
  819. elif self.dtype_backend == "numpy_nullable":
  820. from pandas.io._util import _arrow_dtype_mapping
  821. mapping = _arrow_dtype_mapping().get
  822. else:
  823. mapping = None
  824. return pa_table.to_pandas(types_mapper=mapping)
  825. elif self.engine == "ujson":
  826. if self.lines:
  827. if self.chunksize:
  828. obj = concat(self)
  829. elif self.nrows:
  830. lines = list(islice(self.data, self.nrows))
  831. lines_json = self._combine_lines(lines)
  832. obj = self._get_object_parser(lines_json)
  833. else:
  834. data = ensure_str(self.data)
  835. data_lines = data.split("\n")
  836. obj = self._get_object_parser(self._combine_lines(data_lines))
  837. else:
  838. obj = self._get_object_parser(self.data)
  839. if self.dtype_backend is not lib.no_default:
  840. return obj.convert_dtypes(
  841. infer_objects=False, dtype_backend=self.dtype_backend
  842. )
  843. else:
  844. return obj
  845. def _get_object_parser(self, json) -> DataFrame | Series:
  846. """
  847. Parses a json document into a pandas object.
  848. """
  849. typ = self.typ
  850. dtype = self.dtype
  851. kwargs = {
  852. "orient": self.orient,
  853. "dtype": self.dtype,
  854. "convert_axes": self.convert_axes,
  855. "convert_dates": self.convert_dates,
  856. "keep_default_dates": self.keep_default_dates,
  857. "precise_float": self.precise_float,
  858. "date_unit": self.date_unit,
  859. "dtype_backend": self.dtype_backend,
  860. }
  861. obj = None
  862. if typ == "frame":
  863. obj = FrameParser(json, **kwargs).parse()
  864. if typ == "series" or obj is None:
  865. if not isinstance(dtype, bool):
  866. kwargs["dtype"] = dtype
  867. obj = SeriesParser(json, **kwargs).parse()
  868. return obj
  869. def close(self) -> None:
  870. """
  871. If we opened a stream earlier, in _get_data_from_filepath, we should
  872. close it.
  873. If an open stream or file was passed, we leave it open.
  874. """
  875. if self.handles is not None:
  876. self.handles.close()
  877. def __iter__(self: JsonReader[FrameSeriesStrT]) -> JsonReader[FrameSeriesStrT]:
  878. return self
  879. @overload
  880. def __next__(self: JsonReader[Literal["frame"]]) -> DataFrame:
  881. ...
  882. @overload
  883. def __next__(self: JsonReader[Literal["series"]]) -> Series:
  884. ...
  885. @overload
  886. def __next__(self: JsonReader[Literal["frame", "series"]]) -> DataFrame | Series:
  887. ...
  888. def __next__(self) -> DataFrame | Series:
  889. if self.nrows and self.nrows_seen >= self.nrows:
  890. self.close()
  891. raise StopIteration
  892. lines = list(islice(self.data, self.chunksize))
  893. if not lines:
  894. self.close()
  895. raise StopIteration
  896. try:
  897. lines_json = self._combine_lines(lines)
  898. obj = self._get_object_parser(lines_json)
  899. # Make sure that the returned objects have the right index.
  900. obj.index = range(self.nrows_seen, self.nrows_seen + len(obj))
  901. self.nrows_seen += len(obj)
  902. except Exception as ex:
  903. self.close()
  904. raise ex
  905. if self.dtype_backend is not lib.no_default:
  906. return obj.convert_dtypes(
  907. infer_objects=False, dtype_backend=self.dtype_backend
  908. )
  909. else:
  910. return obj
  911. def __enter__(self) -> JsonReader[FrameSeriesStrT]:
  912. return self
  913. def __exit__(
  914. self,
  915. exc_type: type[BaseException] | None,
  916. exc_value: BaseException | None,
  917. traceback: TracebackType | None,
  918. ) -> None:
  919. self.close()
  920. class Parser:
  921. _split_keys: tuple[str, ...]
  922. _default_orient: str
  923. _STAMP_UNITS = ("s", "ms", "us", "ns")
  924. _MIN_STAMPS = {
  925. "s": 31536000,
  926. "ms": 31536000000,
  927. "us": 31536000000000,
  928. "ns": 31536000000000000,
  929. }
  930. def __init__(
  931. self,
  932. json,
  933. orient,
  934. dtype: DtypeArg | None = None,
  935. convert_axes: bool = True,
  936. convert_dates: bool | list[str] = True,
  937. keep_default_dates: bool = False,
  938. precise_float: bool = False,
  939. date_unit=None,
  940. dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
  941. ) -> None:
  942. self.json = json
  943. if orient is None:
  944. orient = self._default_orient
  945. self.orient = orient
  946. self.dtype = dtype
  947. if date_unit is not None:
  948. date_unit = date_unit.lower()
  949. if date_unit not in self._STAMP_UNITS:
  950. raise ValueError(f"date_unit must be one of {self._STAMP_UNITS}")
  951. self.min_stamp = self._MIN_STAMPS[date_unit]
  952. else:
  953. self.min_stamp = self._MIN_STAMPS["s"]
  954. self.precise_float = precise_float
  955. self.convert_axes = convert_axes
  956. self.convert_dates = convert_dates
  957. self.date_unit = date_unit
  958. self.keep_default_dates = keep_default_dates
  959. self.obj: DataFrame | Series | None = None
  960. self.dtype_backend = dtype_backend
  961. def check_keys_split(self, decoded) -> None:
  962. """
  963. Checks that dict has only the appropriate keys for orient='split'.
  964. """
  965. bad_keys = set(decoded.keys()).difference(set(self._split_keys))
  966. if bad_keys:
  967. bad_keys_joined = ", ".join(bad_keys)
  968. raise ValueError(f"JSON data had unexpected key(s): {bad_keys_joined}")
  969. def parse(self):
  970. self._parse()
  971. if self.obj is None:
  972. return None
  973. if self.convert_axes:
  974. self._convert_axes()
  975. self._try_convert_types()
  976. return self.obj
  977. def _parse(self):
  978. raise AbstractMethodError(self)
  979. def _convert_axes(self) -> None:
  980. """
  981. Try to convert axes.
  982. """
  983. obj = self.obj
  984. assert obj is not None # for mypy
  985. for axis_name in obj._AXIS_ORDERS:
  986. new_axis, result = self._try_convert_data(
  987. name=axis_name,
  988. data=obj._get_axis(axis_name),
  989. use_dtypes=False,
  990. convert_dates=True,
  991. )
  992. if result:
  993. setattr(self.obj, axis_name, new_axis)
  994. def _try_convert_types(self):
  995. raise AbstractMethodError(self)
  996. def _try_convert_data(
  997. self,
  998. name,
  999. data,
  1000. use_dtypes: bool = True,
  1001. convert_dates: bool | list[str] = True,
  1002. ):
  1003. """
  1004. Try to parse a ndarray like into a column by inferring dtype.
  1005. """
  1006. # don't try to coerce, unless a force conversion
  1007. if use_dtypes:
  1008. if not self.dtype:
  1009. if all(notna(data)):
  1010. return data, False
  1011. return data.fillna(np.nan), True
  1012. elif self.dtype is True:
  1013. pass
  1014. else:
  1015. # dtype to force
  1016. dtype = (
  1017. self.dtype.get(name) if isinstance(self.dtype, dict) else self.dtype
  1018. )
  1019. if dtype is not None:
  1020. try:
  1021. return data.astype(dtype), True
  1022. except (TypeError, ValueError):
  1023. return data, False
  1024. if convert_dates:
  1025. new_data, result = self._try_convert_to_date(data)
  1026. if result:
  1027. return new_data, True
  1028. if self.dtype_backend is not lib.no_default and not isinstance(data, ABCIndex):
  1029. # Fall through for conversion later on
  1030. return data, True
  1031. elif data.dtype == "object":
  1032. # try float
  1033. try:
  1034. data = data.astype("float64")
  1035. except (TypeError, ValueError):
  1036. pass
  1037. if data.dtype.kind == "f":
  1038. if data.dtype != "float64":
  1039. # coerce floats to 64
  1040. try:
  1041. data = data.astype("float64")
  1042. except (TypeError, ValueError):
  1043. pass
  1044. # don't coerce 0-len data
  1045. if len(data) and data.dtype in ("float", "object"):
  1046. # coerce ints if we can
  1047. try:
  1048. new_data = data.astype("int64")
  1049. if (new_data == data).all():
  1050. data = new_data
  1051. except (TypeError, ValueError, OverflowError):
  1052. pass
  1053. # coerce ints to 64
  1054. if data.dtype == "int":
  1055. # coerce floats to 64
  1056. try:
  1057. data = data.astype("int64")
  1058. except (TypeError, ValueError):
  1059. pass
  1060. # if we have an index, we want to preserve dtypes
  1061. if name == "index" and len(data):
  1062. if self.orient == "split":
  1063. return data, False
  1064. return data, True
  1065. def _try_convert_to_date(self, data):
  1066. """
  1067. Try to parse a ndarray like into a date column.
  1068. Try to coerce object in epoch/iso formats and integer/float in epoch
  1069. formats. Return a boolean if parsing was successful.
  1070. """
  1071. # no conversion on empty
  1072. if not len(data):
  1073. return data, False
  1074. new_data = data
  1075. if new_data.dtype == "object":
  1076. try:
  1077. new_data = data.astype("int64")
  1078. except OverflowError:
  1079. return data, False
  1080. except (TypeError, ValueError):
  1081. pass
  1082. # ignore numbers that are out of range
  1083. if issubclass(new_data.dtype.type, np.number):
  1084. in_range = (
  1085. isna(new_data._values)
  1086. | (new_data > self.min_stamp)
  1087. | (new_data._values == iNaT)
  1088. )
  1089. if not in_range.all():
  1090. return data, False
  1091. date_units = (self.date_unit,) if self.date_unit else self._STAMP_UNITS
  1092. for date_unit in date_units:
  1093. try:
  1094. new_data = to_datetime(new_data, errors="raise", unit=date_unit)
  1095. except (ValueError, OverflowError, TypeError):
  1096. continue
  1097. return new_data, True
  1098. return data, False
  1099. def _try_convert_dates(self):
  1100. raise AbstractMethodError(self)
  1101. class SeriesParser(Parser):
  1102. _default_orient = "index"
  1103. _split_keys = ("name", "index", "data")
  1104. def _parse(self) -> None:
  1105. data = loads(self.json, precise_float=self.precise_float)
  1106. if self.orient == "split":
  1107. decoded = {str(k): v for k, v in data.items()}
  1108. self.check_keys_split(decoded)
  1109. self.obj = Series(**decoded)
  1110. else:
  1111. self.obj = Series(data)
  1112. def _try_convert_types(self) -> None:
  1113. if self.obj is None:
  1114. return
  1115. obj, result = self._try_convert_data(
  1116. "data", self.obj, convert_dates=self.convert_dates
  1117. )
  1118. if result:
  1119. self.obj = obj
  1120. class FrameParser(Parser):
  1121. _default_orient = "columns"
  1122. _split_keys = ("columns", "index", "data")
  1123. def _parse(self) -> None:
  1124. json = self.json
  1125. orient = self.orient
  1126. if orient == "columns":
  1127. self.obj = DataFrame(
  1128. loads(json, precise_float=self.precise_float), dtype=None
  1129. )
  1130. elif orient == "split":
  1131. decoded = {
  1132. str(k): v
  1133. for k, v in loads(json, precise_float=self.precise_float).items()
  1134. }
  1135. self.check_keys_split(decoded)
  1136. orig_names = [
  1137. (tuple(col) if isinstance(col, list) else col)
  1138. for col in decoded["columns"]
  1139. ]
  1140. decoded["columns"] = dedup_names(
  1141. orig_names,
  1142. is_potential_multi_index(orig_names, None),
  1143. )
  1144. self.obj = DataFrame(dtype=None, **decoded)
  1145. elif orient == "index":
  1146. self.obj = DataFrame.from_dict(
  1147. loads(json, precise_float=self.precise_float),
  1148. dtype=None,
  1149. orient="index",
  1150. )
  1151. elif orient == "table":
  1152. self.obj = parse_table_schema(json, precise_float=self.precise_float)
  1153. else:
  1154. self.obj = DataFrame(
  1155. loads(json, precise_float=self.precise_float), dtype=None
  1156. )
  1157. def _process_converter(self, f, filt=None) -> None:
  1158. """
  1159. Take a conversion function and possibly recreate the frame.
  1160. """
  1161. if filt is None:
  1162. filt = lambda col, c: True
  1163. obj = self.obj
  1164. assert obj is not None # for mypy
  1165. needs_new_obj = False
  1166. new_obj = {}
  1167. for i, (col, c) in enumerate(obj.items()):
  1168. if filt(col, c):
  1169. new_data, result = f(col, c)
  1170. if result:
  1171. c = new_data
  1172. needs_new_obj = True
  1173. new_obj[i] = c
  1174. if needs_new_obj:
  1175. # possibly handle dup columns
  1176. new_frame = DataFrame(new_obj, index=obj.index)
  1177. new_frame.columns = obj.columns
  1178. self.obj = new_frame
  1179. def _try_convert_types(self) -> None:
  1180. if self.obj is None:
  1181. return
  1182. if self.convert_dates:
  1183. self._try_convert_dates()
  1184. self._process_converter(
  1185. lambda col, c: self._try_convert_data(col, c, convert_dates=False)
  1186. )
  1187. def _try_convert_dates(self) -> None:
  1188. if self.obj is None:
  1189. return
  1190. # our columns to parse
  1191. convert_dates_list_bool = self.convert_dates
  1192. if isinstance(convert_dates_list_bool, bool):
  1193. convert_dates_list_bool = []
  1194. convert_dates = set(convert_dates_list_bool)
  1195. def is_ok(col) -> bool:
  1196. """
  1197. Return if this col is ok to try for a date parse.
  1198. """
  1199. if not isinstance(col, str):
  1200. return False
  1201. col_lower = col.lower()
  1202. if (
  1203. col_lower.endswith("_at")
  1204. or col_lower.endswith("_time")
  1205. or col_lower == "modified"
  1206. or col_lower == "date"
  1207. or col_lower == "datetime"
  1208. or col_lower.startswith("timestamp")
  1209. ):
  1210. return True
  1211. return False
  1212. self._process_converter(
  1213. lambda col, c: self._try_convert_to_date(c),
  1214. lambda col, c: (
  1215. (self.keep_default_dates and is_ok(col)) or col in convert_dates
  1216. ),
  1217. )