datetimelike.py 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787
  1. """
  2. Base and utility classes for tseries type pandas objects.
  3. """
  4. from __future__ import annotations
  5. from abc import (
  6. ABC,
  7. abstractmethod,
  8. )
  9. from datetime import datetime
  10. from typing import (
  11. TYPE_CHECKING,
  12. Any,
  13. Callable,
  14. Sequence,
  15. TypeVar,
  16. cast,
  17. final,
  18. )
  19. import numpy as np
  20. from pandas._libs import (
  21. NaT,
  22. Timedelta,
  23. lib,
  24. )
  25. from pandas._libs.tslibs import (
  26. BaseOffset,
  27. Resolution,
  28. Tick,
  29. parsing,
  30. to_offset,
  31. )
  32. from pandas._typing import (
  33. Axis,
  34. npt,
  35. )
  36. from pandas.compat.numpy import function as nv
  37. from pandas.errors import NullFrequencyError
  38. from pandas.util._decorators import (
  39. Appender,
  40. cache_readonly,
  41. doc,
  42. )
  43. from pandas.core.dtypes.common import (
  44. is_categorical_dtype,
  45. is_dtype_equal,
  46. is_integer,
  47. is_list_like,
  48. )
  49. from pandas.core.dtypes.concat import concat_compat
  50. from pandas.core.arrays import (
  51. DatetimeArray,
  52. ExtensionArray,
  53. PeriodArray,
  54. TimedeltaArray,
  55. )
  56. from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
  57. import pandas.core.common as com
  58. import pandas.core.indexes.base as ibase
  59. from pandas.core.indexes.base import (
  60. Index,
  61. _index_shared_docs,
  62. )
  63. from pandas.core.indexes.extension import NDArrayBackedExtensionIndex
  64. from pandas.core.indexes.range import RangeIndex
  65. from pandas.core.tools.timedeltas import to_timedelta
  66. if TYPE_CHECKING:
  67. from pandas import CategoricalIndex
  68. _index_doc_kwargs = dict(ibase._index_doc_kwargs)
  69. _T = TypeVar("_T", bound="DatetimeIndexOpsMixin")
  70. _TDT = TypeVar("_TDT", bound="DatetimeTimedeltaMixin")
  71. class DatetimeIndexOpsMixin(NDArrayBackedExtensionIndex, ABC):
  72. """
  73. Common ops mixin to support a unified interface datetimelike Index.
  74. """
  75. _can_hold_strings = False
  76. _data: DatetimeArray | TimedeltaArray | PeriodArray
  77. @doc(DatetimeLikeArrayMixin.mean)
  78. def mean(self, *, skipna: bool = True, axis: int | None = 0):
  79. return self._data.mean(skipna=skipna, axis=axis)
  80. @property
  81. def freq(self) -> BaseOffset | None:
  82. return self._data.freq
  83. @freq.setter
  84. def freq(self, value) -> None:
  85. # error: Property "freq" defined in "PeriodArray" is read-only [misc]
  86. self._data.freq = value # type: ignore[misc]
  87. @property
  88. def asi8(self) -> npt.NDArray[np.int64]:
  89. return self._data.asi8
  90. @property
  91. @doc(DatetimeLikeArrayMixin.freqstr)
  92. def freqstr(self) -> str | None:
  93. return self._data.freqstr
  94. @cache_readonly
  95. @abstractmethod
  96. def _resolution_obj(self) -> Resolution:
  97. ...
  98. @cache_readonly
  99. @doc(DatetimeLikeArrayMixin.resolution)
  100. def resolution(self) -> str:
  101. return self._data.resolution
  102. # ------------------------------------------------------------------------
  103. @cache_readonly
  104. def hasnans(self) -> bool:
  105. return self._data._hasna
  106. def equals(self, other: Any) -> bool:
  107. """
  108. Determines if two Index objects contain the same elements.
  109. """
  110. if self.is_(other):
  111. return True
  112. if not isinstance(other, Index):
  113. return False
  114. elif other.dtype.kind in ["f", "i", "u", "c"]:
  115. return False
  116. elif not isinstance(other, type(self)):
  117. should_try = False
  118. inferable = self._data._infer_matches
  119. if other.dtype == object:
  120. should_try = other.inferred_type in inferable
  121. elif is_categorical_dtype(other.dtype):
  122. other = cast("CategoricalIndex", other)
  123. should_try = other.categories.inferred_type in inferable
  124. if should_try:
  125. try:
  126. other = type(self)(other)
  127. except (ValueError, TypeError, OverflowError):
  128. # e.g.
  129. # ValueError -> cannot parse str entry, or OutOfBoundsDatetime
  130. # TypeError -> trying to convert IntervalIndex to DatetimeIndex
  131. # OverflowError -> Index([very_large_timedeltas])
  132. return False
  133. if not is_dtype_equal(self.dtype, other.dtype):
  134. # have different timezone
  135. return False
  136. return np.array_equal(self.asi8, other.asi8)
  137. @Appender(Index.__contains__.__doc__)
  138. def __contains__(self, key: Any) -> bool:
  139. hash(key)
  140. try:
  141. self.get_loc(key)
  142. except (KeyError, TypeError, ValueError):
  143. return False
  144. return True
  145. def _convert_tolerance(self, tolerance, target):
  146. tolerance = np.asarray(to_timedelta(tolerance).to_numpy())
  147. return super()._convert_tolerance(tolerance, target)
  148. # --------------------------------------------------------------------
  149. # Rendering Methods
  150. def format(
  151. self,
  152. name: bool = False,
  153. formatter: Callable | None = None,
  154. na_rep: str = "NaT",
  155. date_format: str | None = None,
  156. ) -> list[str]:
  157. """
  158. Render a string representation of the Index.
  159. """
  160. header = []
  161. if name:
  162. header.append(
  163. ibase.pprint_thing(self.name, escape_chars=("\t", "\r", "\n"))
  164. if self.name is not None
  165. else ""
  166. )
  167. if formatter is not None:
  168. return header + list(self.map(formatter))
  169. return self._format_with_header(header, na_rep=na_rep, date_format=date_format)
  170. def _format_with_header(
  171. self, header: list[str], na_rep: str = "NaT", date_format: str | None = None
  172. ) -> list[str]:
  173. # matches base class except for whitespace padding and date_format
  174. return header + list(
  175. self._format_native_types(na_rep=na_rep, date_format=date_format)
  176. )
  177. @property
  178. def _formatter_func(self):
  179. return self._data._formatter()
  180. def _format_attrs(self):
  181. """
  182. Return a list of tuples of the (attr,formatted_value).
  183. """
  184. attrs = super()._format_attrs()
  185. for attrib in self._attributes:
  186. # iterating over _attributes prevents us from doing this for PeriodIndex
  187. if attrib == "freq":
  188. freq = self.freqstr
  189. if freq is not None:
  190. freq = repr(freq) # e.g. D -> 'D'
  191. attrs.append(("freq", freq))
  192. return attrs
  193. @Appender(Index._summary.__doc__)
  194. def _summary(self, name=None) -> str:
  195. result = super()._summary(name=name)
  196. if self.freq:
  197. result += f"\nFreq: {self.freqstr}"
  198. return result
  199. # --------------------------------------------------------------------
  200. # Indexing Methods
  201. @final
  202. def _can_partial_date_slice(self, reso: Resolution) -> bool:
  203. # e.g. test_getitem_setitem_periodindex
  204. # History of conversation GH#3452, GH#3931, GH#2369, GH#14826
  205. return reso > self._resolution_obj
  206. # NB: for DTI/PI, not TDI
  207. def _parsed_string_to_bounds(self, reso: Resolution, parsed):
  208. raise NotImplementedError
  209. def _parse_with_reso(self, label: str):
  210. # overridden by TimedeltaIndex
  211. try:
  212. if self.freq is None or hasattr(self.freq, "rule_code"):
  213. freq = self.freq
  214. except NotImplementedError:
  215. freq = getattr(self, "freqstr", getattr(self, "inferred_freq", None))
  216. freqstr: str | None
  217. if freq is not None and not isinstance(freq, str):
  218. freqstr = freq.rule_code
  219. else:
  220. freqstr = freq
  221. if isinstance(label, np.str_):
  222. # GH#45580
  223. label = str(label)
  224. parsed, reso_str = parsing.parse_datetime_string_with_reso(label, freqstr)
  225. reso = Resolution.from_attrname(reso_str)
  226. return parsed, reso
  227. def _get_string_slice(self, key: str):
  228. # overridden by TimedeltaIndex
  229. parsed, reso = self._parse_with_reso(key)
  230. try:
  231. return self._partial_date_slice(reso, parsed)
  232. except KeyError as err:
  233. raise KeyError(key) from err
  234. @final
  235. def _partial_date_slice(
  236. self,
  237. reso: Resolution,
  238. parsed: datetime,
  239. ):
  240. """
  241. Parameters
  242. ----------
  243. reso : Resolution
  244. parsed : datetime
  245. Returns
  246. -------
  247. slice or ndarray[intp]
  248. """
  249. if not self._can_partial_date_slice(reso):
  250. raise ValueError
  251. t1, t2 = self._parsed_string_to_bounds(reso, parsed)
  252. vals = self._data._ndarray
  253. unbox = self._data._unbox
  254. if self.is_monotonic_increasing:
  255. if len(self) and (
  256. (t1 < self[0] and t2 < self[0]) or (t1 > self[-1] and t2 > self[-1])
  257. ):
  258. # we are out of range
  259. raise KeyError
  260. # TODO: does this depend on being monotonic _increasing_?
  261. # a monotonic (sorted) series can be sliced
  262. left = vals.searchsorted(unbox(t1), side="left")
  263. right = vals.searchsorted(unbox(t2), side="right")
  264. return slice(left, right)
  265. else:
  266. lhs_mask = vals >= unbox(t1)
  267. rhs_mask = vals <= unbox(t2)
  268. # try to find the dates
  269. return (lhs_mask & rhs_mask).nonzero()[0]
  270. def _maybe_cast_slice_bound(self, label, side: str):
  271. """
  272. If label is a string, cast it to scalar type according to resolution.
  273. Parameters
  274. ----------
  275. label : object
  276. side : {'left', 'right'}
  277. Returns
  278. -------
  279. label : object
  280. Notes
  281. -----
  282. Value of `side` parameter should be validated in caller.
  283. """
  284. if isinstance(label, str):
  285. try:
  286. parsed, reso = self._parse_with_reso(label)
  287. except ValueError as err:
  288. # DTI -> parsing.DateParseError
  289. # TDI -> 'unit abbreviation w/o a number'
  290. # PI -> string cannot be parsed as datetime-like
  291. self._raise_invalid_indexer("slice", label, err)
  292. lower, upper = self._parsed_string_to_bounds(reso, parsed)
  293. return lower if side == "left" else upper
  294. elif not isinstance(label, self._data._recognized_scalars):
  295. self._raise_invalid_indexer("slice", label)
  296. return label
  297. # --------------------------------------------------------------------
  298. # Arithmetic Methods
  299. def shift(self: _T, periods: int = 1, freq=None) -> _T:
  300. """
  301. Shift index by desired number of time frequency increments.
  302. This method is for shifting the values of datetime-like indexes
  303. by a specified time increment a given number of times.
  304. Parameters
  305. ----------
  306. periods : int, default 1
  307. Number of periods (or increments) to shift by,
  308. can be positive or negative.
  309. freq : pandas.DateOffset, pandas.Timedelta or string, optional
  310. Frequency increment to shift by.
  311. If None, the index is shifted by its own `freq` attribute.
  312. Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.
  313. Returns
  314. -------
  315. pandas.DatetimeIndex
  316. Shifted index.
  317. See Also
  318. --------
  319. Index.shift : Shift values of Index.
  320. PeriodIndex.shift : Shift values of PeriodIndex.
  321. """
  322. raise NotImplementedError
  323. # --------------------------------------------------------------------
  324. @doc(Index._maybe_cast_listlike_indexer)
  325. def _maybe_cast_listlike_indexer(self, keyarr):
  326. try:
  327. res = self._data._validate_listlike(keyarr, allow_object=True)
  328. except (ValueError, TypeError):
  329. if not isinstance(keyarr, ExtensionArray):
  330. # e.g. we don't want to cast DTA to ndarray[object]
  331. res = com.asarray_tuplesafe(keyarr)
  332. # TODO: com.asarray_tuplesafe shouldn't cast e.g. DatetimeArray
  333. else:
  334. res = keyarr
  335. return Index(res, dtype=res.dtype)
  336. class DatetimeTimedeltaMixin(DatetimeIndexOpsMixin, ABC):
  337. """
  338. Mixin class for methods shared by DatetimeIndex and TimedeltaIndex,
  339. but not PeriodIndex
  340. """
  341. _data: DatetimeArray | TimedeltaArray
  342. _comparables = ["name", "freq"]
  343. _attributes = ["name", "freq"]
  344. # Compat for frequency inference, see GH#23789
  345. _is_monotonic_increasing = Index.is_monotonic_increasing
  346. _is_monotonic_decreasing = Index.is_monotonic_decreasing
  347. _is_unique = Index.is_unique
  348. _join_precedence = 10
  349. @property
  350. def unit(self) -> str:
  351. return self._data.unit
  352. def as_unit(self: _TDT, unit: str) -> _TDT:
  353. """
  354. Convert to a dtype with the given unit resolution.
  355. Parameters
  356. ----------
  357. unit : {'s', 'ms', 'us', 'ns'}
  358. Returns
  359. -------
  360. same type as self
  361. """
  362. arr = self._data.as_unit(unit)
  363. return type(self)._simple_new(arr, name=self.name)
  364. def _with_freq(self, freq):
  365. arr = self._data._with_freq(freq)
  366. return type(self)._simple_new(arr, name=self._name)
  367. @property
  368. def values(self) -> np.ndarray:
  369. # NB: For Datetime64TZ this is lossy
  370. return self._data._ndarray
  371. @doc(DatetimeIndexOpsMixin.shift)
  372. def shift(self: _TDT, periods: int = 1, freq=None) -> _TDT:
  373. if freq is not None and freq != self.freq:
  374. if isinstance(freq, str):
  375. freq = to_offset(freq)
  376. offset = periods * freq
  377. return self + offset
  378. if periods == 0 or len(self) == 0:
  379. # GH#14811 empty case
  380. return self.copy()
  381. if self.freq is None:
  382. raise NullFrequencyError("Cannot shift with no freq")
  383. start = self[0] + periods * self.freq
  384. end = self[-1] + periods * self.freq
  385. # Note: in the DatetimeTZ case, _generate_range will infer the
  386. # appropriate timezone from `start` and `end`, so tz does not need
  387. # to be passed explicitly.
  388. result = self._data._generate_range(
  389. start=start, end=end, periods=None, freq=self.freq
  390. )
  391. return type(self)._simple_new(result, name=self.name)
  392. @cache_readonly
  393. @doc(DatetimeLikeArrayMixin.inferred_freq)
  394. def inferred_freq(self) -> str | None:
  395. return self._data.inferred_freq
  396. # --------------------------------------------------------------------
  397. # Set Operation Methods
  398. @cache_readonly
  399. def _as_range_index(self) -> RangeIndex:
  400. # Convert our i8 representations to RangeIndex
  401. # Caller is responsible for checking isinstance(self.freq, Tick)
  402. freq = cast(Tick, self.freq)
  403. tick = freq.delta._value
  404. rng = range(self[0]._value, self[-1]._value + tick, tick)
  405. return RangeIndex(rng)
  406. def _can_range_setop(self, other):
  407. return isinstance(self.freq, Tick) and isinstance(other.freq, Tick)
  408. def _wrap_range_setop(self, other, res_i8):
  409. new_freq = None
  410. if not len(res_i8):
  411. # RangeIndex defaults to step=1, which we don't want.
  412. new_freq = self.freq
  413. elif isinstance(res_i8, RangeIndex):
  414. new_freq = to_offset(Timedelta(res_i8.step))
  415. # TODO(GH#41493): we cannot just do
  416. # type(self._data)(res_i8.values, dtype=self.dtype, freq=new_freq)
  417. # because test_setops_preserve_freq fails with _validate_frequency raising.
  418. # This raising is incorrect, as 'on_freq' is incorrect. This will
  419. # be fixed by GH#41493
  420. res_values = res_i8.values.view(self._data._ndarray.dtype)
  421. result = type(self._data)._simple_new(
  422. res_values, dtype=self.dtype, freq=new_freq
  423. )
  424. return self._wrap_setop_result(other, result)
  425. def _range_intersect(self, other, sort):
  426. # Dispatch to RangeIndex intersection logic.
  427. left = self._as_range_index
  428. right = other._as_range_index
  429. res_i8 = left.intersection(right, sort=sort)
  430. return self._wrap_range_setop(other, res_i8)
  431. def _range_union(self, other, sort):
  432. # Dispatch to RangeIndex union logic.
  433. left = self._as_range_index
  434. right = other._as_range_index
  435. res_i8 = left.union(right, sort=sort)
  436. return self._wrap_range_setop(other, res_i8)
  437. def _intersection(self, other: Index, sort: bool = False) -> Index:
  438. """
  439. intersection specialized to the case with matching dtypes and both non-empty.
  440. """
  441. other = cast("DatetimeTimedeltaMixin", other)
  442. if self._can_range_setop(other):
  443. return self._range_intersect(other, sort=sort)
  444. if not self._can_fast_intersect(other):
  445. result = Index._intersection(self, other, sort=sort)
  446. # We need to invalidate the freq because Index._intersection
  447. # uses _shallow_copy on a view of self._data, which will preserve
  448. # self.freq if we're not careful.
  449. # At this point we should have result.dtype == self.dtype
  450. # and type(result) is type(self._data)
  451. result = self._wrap_setop_result(other, result)
  452. return result._with_freq(None)._with_freq("infer")
  453. else:
  454. return self._fast_intersect(other, sort)
  455. def _fast_intersect(self, other, sort):
  456. # to make our life easier, "sort" the two ranges
  457. if self[0] <= other[0]:
  458. left, right = self, other
  459. else:
  460. left, right = other, self
  461. # after sorting, the intersection always starts with the right index
  462. # and ends with the index of which the last elements is smallest
  463. end = min(left[-1], right[-1])
  464. start = right[0]
  465. if end < start:
  466. result = self[:0]
  467. else:
  468. lslice = slice(*left.slice_locs(start, end))
  469. result = left._values[lslice]
  470. return result
  471. def _can_fast_intersect(self: _T, other: _T) -> bool:
  472. # Note: we only get here with len(self) > 0 and len(other) > 0
  473. if self.freq is None:
  474. return False
  475. elif other.freq != self.freq:
  476. return False
  477. elif not self.is_monotonic_increasing:
  478. # Because freq is not None, we must then be monotonic decreasing
  479. return False
  480. # this along with matching freqs ensure that we "line up",
  481. # so intersection will preserve freq
  482. # Note we are assuming away Ticks, as those go through _range_intersect
  483. # GH#42104
  484. return self.freq.n == 1
  485. def _can_fast_union(self: _T, other: _T) -> bool:
  486. # Assumes that type(self) == type(other), as per the annotation
  487. # The ability to fast_union also implies that `freq` should be
  488. # retained on union.
  489. freq = self.freq
  490. if freq is None or freq != other.freq:
  491. return False
  492. if not self.is_monotonic_increasing:
  493. # Because freq is not None, we must then be monotonic decreasing
  494. # TODO: do union on the reversed indexes?
  495. return False
  496. if len(self) == 0 or len(other) == 0:
  497. # only reached via union_many
  498. return True
  499. # to make our life easier, "sort" the two ranges
  500. if self[0] <= other[0]:
  501. left, right = self, other
  502. else:
  503. left, right = other, self
  504. right_start = right[0]
  505. left_end = left[-1]
  506. # Only need to "adjoin", not overlap
  507. return (right_start == left_end + freq) or right_start in left
  508. def _fast_union(self: _TDT, other: _TDT, sort=None) -> _TDT:
  509. # Caller is responsible for ensuring self and other are non-empty
  510. # to make our life easier, "sort" the two ranges
  511. if self[0] <= other[0]:
  512. left, right = self, other
  513. elif sort is False:
  514. # TDIs are not in the "correct" order and we don't want
  515. # to sort but want to remove overlaps
  516. left, right = self, other
  517. left_start = left[0]
  518. loc = right.searchsorted(left_start, side="left")
  519. right_chunk = right._values[:loc]
  520. dates = concat_compat((left._values, right_chunk))
  521. result = type(self)._simple_new(dates, name=self.name)
  522. return result
  523. else:
  524. left, right = other, self
  525. left_end = left[-1]
  526. right_end = right[-1]
  527. # concatenate
  528. if left_end < right_end:
  529. loc = right.searchsorted(left_end, side="right")
  530. right_chunk = right._values[loc:]
  531. dates = concat_compat([left._values, right_chunk])
  532. # The can_fast_union check ensures that the result.freq
  533. # should match self.freq
  534. dates = type(self._data)(dates, freq=self.freq)
  535. result = type(self)._simple_new(dates)
  536. return result
  537. else:
  538. return left
  539. def _union(self, other, sort):
  540. # We are called by `union`, which is responsible for this validation
  541. assert isinstance(other, type(self))
  542. assert self.dtype == other.dtype
  543. if self._can_range_setop(other):
  544. return self._range_union(other, sort=sort)
  545. if self._can_fast_union(other):
  546. result = self._fast_union(other, sort=sort)
  547. # in the case with sort=None, the _can_fast_union check ensures
  548. # that result.freq == self.freq
  549. return result
  550. else:
  551. return super()._union(other, sort)._with_freq("infer")
  552. # --------------------------------------------------------------------
  553. # Join Methods
  554. def _get_join_freq(self, other):
  555. """
  556. Get the freq to attach to the result of a join operation.
  557. """
  558. freq = None
  559. if self._can_fast_union(other):
  560. freq = self.freq
  561. return freq
  562. def _wrap_joined_index(
  563. self, joined, other, lidx: npt.NDArray[np.intp], ridx: npt.NDArray[np.intp]
  564. ):
  565. assert other.dtype == self.dtype, (other.dtype, self.dtype)
  566. result = super()._wrap_joined_index(joined, other, lidx, ridx)
  567. result._data._freq = self._get_join_freq(other)
  568. return result
  569. def _get_engine_target(self) -> np.ndarray:
  570. # engine methods and libjoin methods need dt64/td64 values cast to i8
  571. return self._data._ndarray.view("i8")
  572. def _from_join_target(self, result: np.ndarray):
  573. # view e.g. i8 back to M8[ns]
  574. result = result.view(self._data._ndarray.dtype)
  575. return self._data._from_backing_data(result)
  576. # --------------------------------------------------------------------
  577. # List-like Methods
  578. def _get_delete_freq(self, loc: int | slice | Sequence[int]):
  579. """
  580. Find the `freq` for self.delete(loc).
  581. """
  582. freq = None
  583. if self.freq is not None:
  584. if is_integer(loc):
  585. if loc in (0, -len(self), -1, len(self) - 1):
  586. freq = self.freq
  587. else:
  588. if is_list_like(loc):
  589. # error: Incompatible types in assignment (expression has
  590. # type "Union[slice, ndarray]", variable has type
  591. # "Union[int, slice, Sequence[int]]")
  592. loc = lib.maybe_indices_to_slice( # type: ignore[assignment]
  593. np.asarray(loc, dtype=np.intp), len(self)
  594. )
  595. if isinstance(loc, slice) and loc.step in (1, None):
  596. if loc.start in (0, None) or loc.stop in (len(self), None):
  597. freq = self.freq
  598. return freq
  599. def _get_insert_freq(self, loc: int, item):
  600. """
  601. Find the `freq` for self.insert(loc, item).
  602. """
  603. value = self._data._validate_scalar(item)
  604. item = self._data._box_func(value)
  605. freq = None
  606. if self.freq is not None:
  607. # freq can be preserved on edge cases
  608. if self.size:
  609. if item is NaT:
  610. pass
  611. elif loc in (0, -len(self)) and item + self.freq == self[0]:
  612. freq = self.freq
  613. elif (loc == len(self)) and item - self.freq == self[-1]:
  614. freq = self.freq
  615. else:
  616. # Adding a single item to an empty index may preserve freq
  617. if isinstance(self.freq, Tick):
  618. # all TimedeltaIndex cases go through here; is_on_offset
  619. # would raise TypeError
  620. freq = self.freq
  621. elif self.freq.is_on_offset(item):
  622. freq = self.freq
  623. return freq
  624. @doc(NDArrayBackedExtensionIndex.delete)
  625. def delete(self, loc) -> DatetimeTimedeltaMixin:
  626. result = super().delete(loc)
  627. result._data._freq = self._get_delete_freq(loc)
  628. return result
  629. @doc(NDArrayBackedExtensionIndex.insert)
  630. def insert(self, loc: int, item):
  631. result = super().insert(loc, item)
  632. if isinstance(result, type(self)):
  633. # i.e. parent class method did not cast
  634. result._data._freq = self._get_insert_freq(loc, item)
  635. return result
  636. # --------------------------------------------------------------------
  637. # NDArray-Like Methods
  638. @Appender(_index_shared_docs["take"] % _index_doc_kwargs)
  639. def take(
  640. self,
  641. indices,
  642. axis: Axis = 0,
  643. allow_fill: bool = True,
  644. fill_value=None,
  645. **kwargs,
  646. ):
  647. nv.validate_take((), kwargs)
  648. indices = np.asarray(indices, dtype=np.intp)
  649. result = NDArrayBackedExtensionIndex.take(
  650. self, indices, axis, allow_fill, fill_value, **kwargs
  651. )
  652. maybe_slice = lib.maybe_indices_to_slice(indices, len(self))
  653. if isinstance(maybe_slice, slice):
  654. freq = self._data._get_getitem_freq(maybe_slice)
  655. result._data._freq = freq
  656. return result