test_stata.py 86 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277
  1. import bz2
  2. import datetime as dt
  3. from datetime import datetime
  4. import gzip
  5. import io
  6. import os
  7. import struct
  8. import tarfile
  9. import warnings
  10. import zipfile
  11. import numpy as np
  12. import pytest
  13. from pandas.core.dtypes.common import is_categorical_dtype
  14. import pandas as pd
  15. import pandas._testing as tm
  16. from pandas.core.frame import (
  17. DataFrame,
  18. Series,
  19. )
  20. from pandas.tests.io.test_compression import _compression_to_extension
  21. from pandas.io.parsers import read_csv
  22. from pandas.io.stata import (
  23. CategoricalConversionWarning,
  24. InvalidColumnName,
  25. PossiblePrecisionLoss,
  26. StataMissingValue,
  27. StataReader,
  28. StataWriter,
  29. StataWriterUTF8,
  30. ValueLabelTypeMismatch,
  31. read_stata,
  32. )
  33. @pytest.fixture
  34. def mixed_frame():
  35. return DataFrame(
  36. {
  37. "a": [1, 2, 3, 4],
  38. "b": [1.0, 3.0, 27.0, 81.0],
  39. "c": ["Atlanta", "Birmingham", "Cincinnati", "Detroit"],
  40. }
  41. )
  42. @pytest.fixture
  43. def parsed_114(datapath):
  44. dta14_114 = datapath("io", "data", "stata", "stata5_114.dta")
  45. parsed_114 = read_stata(dta14_114, convert_dates=True)
  46. parsed_114.index.name = "index"
  47. return parsed_114
  48. class TestStata:
  49. def read_dta(self, file):
  50. # Legacy default reader configuration
  51. return read_stata(file, convert_dates=True)
  52. def read_csv(self, file):
  53. return read_csv(file, parse_dates=True)
  54. @pytest.mark.parametrize("version", [114, 117, 118, 119, None])
  55. def test_read_empty_dta(self, version):
  56. empty_ds = DataFrame(columns=["unit"])
  57. # GH 7369, make sure can read a 0-obs dta file
  58. with tm.ensure_clean() as path:
  59. empty_ds.to_stata(path, write_index=False, version=version)
  60. empty_ds2 = read_stata(path)
  61. tm.assert_frame_equal(empty_ds, empty_ds2)
  62. @pytest.mark.parametrize("version", [114, 117, 118, 119, None])
  63. def test_read_index_col_none(self, version):
  64. df = DataFrame({"a": range(5), "b": ["b1", "b2", "b3", "b4", "b5"]})
  65. # GH 7369, make sure can read a 0-obs dta file
  66. with tm.ensure_clean() as path:
  67. df.to_stata(path, write_index=False, version=version)
  68. read_df = read_stata(path)
  69. assert isinstance(read_df.index, pd.RangeIndex)
  70. expected = df.copy()
  71. expected["a"] = expected["a"].astype(np.int32)
  72. tm.assert_frame_equal(read_df, expected, check_index_type=True)
  73. @pytest.mark.parametrize("file", ["stata1_114", "stata1_117"])
  74. def test_read_dta1(self, file, datapath):
  75. file = datapath("io", "data", "stata", f"{file}.dta")
  76. parsed = self.read_dta(file)
  77. # Pandas uses np.nan as missing value.
  78. # Thus, all columns will be of type float, regardless of their name.
  79. expected = DataFrame(
  80. [(np.nan, np.nan, np.nan, np.nan, np.nan)],
  81. columns=["float_miss", "double_miss", "byte_miss", "int_miss", "long_miss"],
  82. )
  83. # this is an oddity as really the nan should be float64, but
  84. # the casting doesn't fail so need to match stata here
  85. expected["float_miss"] = expected["float_miss"].astype(np.float32)
  86. tm.assert_frame_equal(parsed, expected)
  87. def test_read_dta2(self, datapath):
  88. expected = DataFrame.from_records(
  89. [
  90. (
  91. datetime(2006, 11, 19, 23, 13, 20),
  92. 1479596223000,
  93. datetime(2010, 1, 20),
  94. datetime(2010, 1, 8),
  95. datetime(2010, 1, 1),
  96. datetime(1974, 7, 1),
  97. datetime(2010, 1, 1),
  98. datetime(2010, 1, 1),
  99. ),
  100. (
  101. datetime(1959, 12, 31, 20, 3, 20),
  102. -1479590,
  103. datetime(1953, 10, 2),
  104. datetime(1948, 6, 10),
  105. datetime(1955, 1, 1),
  106. datetime(1955, 7, 1),
  107. datetime(1955, 1, 1),
  108. datetime(2, 1, 1),
  109. ),
  110. (pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT),
  111. ],
  112. columns=[
  113. "datetime_c",
  114. "datetime_big_c",
  115. "date",
  116. "weekly_date",
  117. "monthly_date",
  118. "quarterly_date",
  119. "half_yearly_date",
  120. "yearly_date",
  121. ],
  122. )
  123. expected["yearly_date"] = expected["yearly_date"].astype("O")
  124. with warnings.catch_warnings(record=True) as w:
  125. warnings.simplefilter("always")
  126. parsed_114 = self.read_dta(
  127. datapath("io", "data", "stata", "stata2_114.dta")
  128. )
  129. parsed_115 = self.read_dta(
  130. datapath("io", "data", "stata", "stata2_115.dta")
  131. )
  132. parsed_117 = self.read_dta(
  133. datapath("io", "data", "stata", "stata2_117.dta")
  134. )
  135. # 113 is buggy due to limits of date format support in Stata
  136. # parsed_113 = self.read_dta(
  137. # datapath("io", "data", "stata", "stata2_113.dta")
  138. # )
  139. # Remove resource warnings
  140. w = [x for x in w if x.category is UserWarning]
  141. # should get warning for each call to read_dta
  142. assert len(w) == 3
  143. # buggy test because of the NaT comparison on certain platforms
  144. # Format 113 test fails since it does not support tc and tC formats
  145. # tm.assert_frame_equal(parsed_113, expected)
  146. tm.assert_frame_equal(parsed_114, expected, check_datetimelike_compat=True)
  147. tm.assert_frame_equal(parsed_115, expected, check_datetimelike_compat=True)
  148. tm.assert_frame_equal(parsed_117, expected, check_datetimelike_compat=True)
  149. @pytest.mark.parametrize(
  150. "file", ["stata3_113", "stata3_114", "stata3_115", "stata3_117"]
  151. )
  152. def test_read_dta3(self, file, datapath):
  153. file = datapath("io", "data", "stata", f"{file}.dta")
  154. parsed = self.read_dta(file)
  155. # match stata here
  156. expected = self.read_csv(datapath("io", "data", "stata", "stata3.csv"))
  157. expected = expected.astype(np.float32)
  158. expected["year"] = expected["year"].astype(np.int16)
  159. expected["quarter"] = expected["quarter"].astype(np.int8)
  160. tm.assert_frame_equal(parsed, expected)
  161. @pytest.mark.parametrize(
  162. "file", ["stata4_113", "stata4_114", "stata4_115", "stata4_117"]
  163. )
  164. def test_read_dta4(self, file, datapath):
  165. file = datapath("io", "data", "stata", f"{file}.dta")
  166. parsed = self.read_dta(file)
  167. expected = DataFrame.from_records(
  168. [
  169. ["one", "ten", "one", "one", "one"],
  170. ["two", "nine", "two", "two", "two"],
  171. ["three", "eight", "three", "three", "three"],
  172. ["four", "seven", 4, "four", "four"],
  173. ["five", "six", 5, np.nan, "five"],
  174. ["six", "five", 6, np.nan, "six"],
  175. ["seven", "four", 7, np.nan, "seven"],
  176. ["eight", "three", 8, np.nan, "eight"],
  177. ["nine", "two", 9, np.nan, "nine"],
  178. ["ten", "one", "ten", np.nan, "ten"],
  179. ],
  180. columns=[
  181. "fully_labeled",
  182. "fully_labeled2",
  183. "incompletely_labeled",
  184. "labeled_with_missings",
  185. "float_labelled",
  186. ],
  187. )
  188. # these are all categoricals
  189. for col in expected:
  190. orig = expected[col].copy()
  191. categories = np.asarray(expected["fully_labeled"][orig.notna()])
  192. if col == "incompletely_labeled":
  193. categories = orig
  194. cat = orig.astype("category")._values
  195. cat = cat.set_categories(categories, ordered=True)
  196. cat.categories.rename(None, inplace=True)
  197. expected[col] = cat
  198. # stata doesn't save .category metadata
  199. tm.assert_frame_equal(parsed, expected)
  200. # File containing strls
  201. def test_read_dta12(self, datapath):
  202. parsed_117 = self.read_dta(datapath("io", "data", "stata", "stata12_117.dta"))
  203. expected = DataFrame.from_records(
  204. [
  205. [1, "abc", "abcdefghi"],
  206. [3, "cba", "qwertywertyqwerty"],
  207. [93, "", "strl"],
  208. ],
  209. columns=["x", "y", "z"],
  210. )
  211. tm.assert_frame_equal(parsed_117, expected, check_dtype=False)
  212. def test_read_dta18(self, datapath):
  213. parsed_118 = self.read_dta(datapath("io", "data", "stata", "stata14_118.dta"))
  214. parsed_118["Bytes"] = parsed_118["Bytes"].astype("O")
  215. expected = DataFrame.from_records(
  216. [
  217. ["Cat", "Bogota", "Bogotá", 1, 1.0, "option b Ünicode", 1.0],
  218. ["Dog", "Boston", "Uzunköprü", np.nan, np.nan, np.nan, np.nan],
  219. ["Plane", "Rome", "Tromsø", 0, 0.0, "option a", 0.0],
  220. ["Potato", "Tokyo", "Elâzığ", -4, 4.0, 4, 4],
  221. ["", "", "", 0, 0.3332999, "option a", 1 / 3.0],
  222. ],
  223. columns=[
  224. "Things",
  225. "Cities",
  226. "Unicode_Cities_Strl",
  227. "Ints",
  228. "Floats",
  229. "Bytes",
  230. "Longs",
  231. ],
  232. )
  233. expected["Floats"] = expected["Floats"].astype(np.float32)
  234. for col in parsed_118.columns:
  235. tm.assert_almost_equal(parsed_118[col], expected[col])
  236. with StataReader(datapath("io", "data", "stata", "stata14_118.dta")) as rdr:
  237. vl = rdr.variable_labels()
  238. vl_expected = {
  239. "Unicode_Cities_Strl": "Here are some strls with Ünicode chars",
  240. "Longs": "long data",
  241. "Things": "Here are some things",
  242. "Bytes": "byte data",
  243. "Ints": "int data",
  244. "Cities": "Here are some cities",
  245. "Floats": "float data",
  246. }
  247. tm.assert_dict_equal(vl, vl_expected)
  248. assert rdr.data_label == "This is a Ünicode data label"
  249. def test_read_write_dta5(self):
  250. original = DataFrame(
  251. [(np.nan, np.nan, np.nan, np.nan, np.nan)],
  252. columns=["float_miss", "double_miss", "byte_miss", "int_miss", "long_miss"],
  253. )
  254. original.index.name = "index"
  255. with tm.ensure_clean() as path:
  256. original.to_stata(path, convert_dates=None)
  257. written_and_read_again = self.read_dta(path)
  258. expected = original.copy()
  259. expected.index = expected.index.astype(np.int32)
  260. tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
  261. def test_write_dta6(self, datapath):
  262. original = self.read_csv(datapath("io", "data", "stata", "stata3.csv"))
  263. original.index.name = "index"
  264. original.index = original.index.astype(np.int32)
  265. original["year"] = original["year"].astype(np.int32)
  266. original["quarter"] = original["quarter"].astype(np.int32)
  267. with tm.ensure_clean() as path:
  268. original.to_stata(path, convert_dates=None)
  269. written_and_read_again = self.read_dta(path)
  270. tm.assert_frame_equal(
  271. written_and_read_again.set_index("index"),
  272. original,
  273. check_index_type=False,
  274. )
  275. @pytest.mark.parametrize("version", [114, 117, 118, 119, None])
  276. def test_read_write_dta10(self, version):
  277. original = DataFrame(
  278. data=[["string", "object", 1, 1.1, np.datetime64("2003-12-25")]],
  279. columns=["string", "object", "integer", "floating", "datetime"],
  280. )
  281. original["object"] = Series(original["object"], dtype=object)
  282. original.index.name = "index"
  283. original.index = original.index.astype(np.int32)
  284. original["integer"] = original["integer"].astype(np.int32)
  285. with tm.ensure_clean() as path:
  286. original.to_stata(path, convert_dates={"datetime": "tc"}, version=version)
  287. written_and_read_again = self.read_dta(path)
  288. # original.index is np.int32, read index is np.int64
  289. tm.assert_frame_equal(
  290. written_and_read_again.set_index("index"),
  291. original,
  292. check_index_type=False,
  293. )
  294. def test_stata_doc_examples(self):
  295. with tm.ensure_clean() as path:
  296. df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
  297. df.to_stata(path)
  298. def test_write_preserves_original(self):
  299. # 9795
  300. np.random.seed(423)
  301. df = DataFrame(np.random.randn(5, 4), columns=list("abcd"))
  302. df.loc[2, "a":"c"] = np.nan
  303. df_copy = df.copy()
  304. with tm.ensure_clean() as path:
  305. df.to_stata(path, write_index=False)
  306. tm.assert_frame_equal(df, df_copy)
  307. @pytest.mark.parametrize("version", [114, 117, 118, 119, None])
  308. def test_encoding(self, version, datapath):
  309. # GH 4626, proper encoding handling
  310. raw = read_stata(datapath("io", "data", "stata", "stata1_encoding.dta"))
  311. encoded = read_stata(datapath("io", "data", "stata", "stata1_encoding.dta"))
  312. result = encoded.kreis1849[0]
  313. expected = raw.kreis1849[0]
  314. assert result == expected
  315. assert isinstance(result, str)
  316. with tm.ensure_clean() as path:
  317. encoded.to_stata(path, write_index=False, version=version)
  318. reread_encoded = read_stata(path)
  319. tm.assert_frame_equal(encoded, reread_encoded)
  320. def test_read_write_dta11(self):
  321. original = DataFrame(
  322. [(1, 2, 3, 4)],
  323. columns=[
  324. "good",
  325. "b\u00E4d",
  326. "8number",
  327. "astringwithmorethan32characters______",
  328. ],
  329. )
  330. formatted = DataFrame(
  331. [(1, 2, 3, 4)],
  332. columns=["good", "b_d", "_8number", "astringwithmorethan32characters_"],
  333. )
  334. formatted.index.name = "index"
  335. formatted = formatted.astype(np.int32)
  336. with tm.ensure_clean() as path:
  337. with tm.assert_produces_warning(InvalidColumnName):
  338. original.to_stata(path, convert_dates=None)
  339. written_and_read_again = self.read_dta(path)
  340. expected = formatted.copy()
  341. expected.index = expected.index.astype(np.int32)
  342. tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
  343. @pytest.mark.parametrize("version", [114, 117, 118, 119, None])
  344. def test_read_write_dta12(self, version):
  345. original = DataFrame(
  346. [(1, 2, 3, 4, 5, 6)],
  347. columns=[
  348. "astringwithmorethan32characters_1",
  349. "astringwithmorethan32characters_2",
  350. "+",
  351. "-",
  352. "short",
  353. "delete",
  354. ],
  355. )
  356. formatted = DataFrame(
  357. [(1, 2, 3, 4, 5, 6)],
  358. columns=[
  359. "astringwithmorethan32characters_",
  360. "_0astringwithmorethan32character",
  361. "_",
  362. "_1_",
  363. "_short",
  364. "_delete",
  365. ],
  366. )
  367. formatted.index.name = "index"
  368. formatted = formatted.astype(np.int32)
  369. with tm.ensure_clean() as path:
  370. with warnings.catch_warnings(record=True) as w:
  371. warnings.simplefilter("always", InvalidColumnName)
  372. original.to_stata(path, convert_dates=None, version=version)
  373. # should get a warning for that format.
  374. assert len(w) == 1
  375. written_and_read_again = self.read_dta(path)
  376. expected = formatted.copy()
  377. expected.index = expected.index.astype(np.int32)
  378. tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
  379. def test_read_write_dta13(self):
  380. s1 = Series(2**9, dtype=np.int16)
  381. s2 = Series(2**17, dtype=np.int32)
  382. s3 = Series(2**33, dtype=np.int64)
  383. original = DataFrame({"int16": s1, "int32": s2, "int64": s3})
  384. original.index.name = "index"
  385. formatted = original
  386. formatted["int64"] = formatted["int64"].astype(np.float64)
  387. with tm.ensure_clean() as path:
  388. original.to_stata(path)
  389. written_and_read_again = self.read_dta(path)
  390. expected = formatted.copy()
  391. expected.index = expected.index.astype(np.int32)
  392. tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
  393. @pytest.mark.parametrize("version", [114, 117, 118, 119, None])
  394. @pytest.mark.parametrize(
  395. "file", ["stata5_113", "stata5_114", "stata5_115", "stata5_117"]
  396. )
  397. def test_read_write_reread_dta14(self, file, parsed_114, version, datapath):
  398. file = datapath("io", "data", "stata", f"{file}.dta")
  399. parsed = self.read_dta(file)
  400. parsed.index.name = "index"
  401. tm.assert_frame_equal(parsed_114, parsed)
  402. with tm.ensure_clean() as path:
  403. parsed_114.to_stata(path, convert_dates={"date_td": "td"}, version=version)
  404. written_and_read_again = self.read_dta(path)
  405. expected = parsed_114.copy()
  406. expected.index = expected.index.astype(np.int32)
  407. tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
  408. @pytest.mark.parametrize(
  409. "file", ["stata6_113", "stata6_114", "stata6_115", "stata6_117"]
  410. )
  411. def test_read_write_reread_dta15(self, file, datapath):
  412. expected = self.read_csv(datapath("io", "data", "stata", "stata6.csv"))
  413. expected["byte_"] = expected["byte_"].astype(np.int8)
  414. expected["int_"] = expected["int_"].astype(np.int16)
  415. expected["long_"] = expected["long_"].astype(np.int32)
  416. expected["float_"] = expected["float_"].astype(np.float32)
  417. expected["double_"] = expected["double_"].astype(np.float64)
  418. expected["date_td"] = expected["date_td"].apply(
  419. datetime.strptime, args=("%Y-%m-%d",)
  420. )
  421. file = datapath("io", "data", "stata", f"{file}.dta")
  422. parsed = self.read_dta(file)
  423. tm.assert_frame_equal(expected, parsed)
  424. @pytest.mark.parametrize("version", [114, 117, 118, 119, None])
  425. def test_timestamp_and_label(self, version):
  426. original = DataFrame([(1,)], columns=["variable"])
  427. time_stamp = datetime(2000, 2, 29, 14, 21)
  428. data_label = "This is a data file."
  429. with tm.ensure_clean() as path:
  430. original.to_stata(
  431. path, time_stamp=time_stamp, data_label=data_label, version=version
  432. )
  433. with StataReader(path) as reader:
  434. assert reader.time_stamp == "29 Feb 2000 14:21"
  435. assert reader.data_label == data_label
  436. @pytest.mark.parametrize("version", [114, 117, 118, 119, None])
  437. def test_invalid_timestamp(self, version):
  438. original = DataFrame([(1,)], columns=["variable"])
  439. time_stamp = "01 Jan 2000, 00:00:00"
  440. with tm.ensure_clean() as path:
  441. msg = "time_stamp should be datetime type"
  442. with pytest.raises(ValueError, match=msg):
  443. original.to_stata(path, time_stamp=time_stamp, version=version)
  444. assert not os.path.isfile(path)
  445. def test_numeric_column_names(self):
  446. original = DataFrame(np.reshape(np.arange(25.0), (5, 5)))
  447. original.index.name = "index"
  448. with tm.ensure_clean() as path:
  449. # should get a warning for that format.
  450. with tm.assert_produces_warning(InvalidColumnName):
  451. original.to_stata(path)
  452. written_and_read_again = self.read_dta(path)
  453. written_and_read_again = written_and_read_again.set_index("index")
  454. columns = list(written_and_read_again.columns)
  455. convert_col_name = lambda x: int(x[1])
  456. written_and_read_again.columns = map(convert_col_name, columns)
  457. expected = original.copy()
  458. expected.index = expected.index.astype(np.int32)
  459. tm.assert_frame_equal(expected, written_and_read_again)
  460. @pytest.mark.parametrize("version", [114, 117, 118, 119, None])
  461. def test_nan_to_missing_value(self, version):
  462. s1 = Series(np.arange(4.0), dtype=np.float32)
  463. s2 = Series(np.arange(4.0), dtype=np.float64)
  464. s1[::2] = np.nan
  465. s2[1::2] = np.nan
  466. original = DataFrame({"s1": s1, "s2": s2})
  467. original.index.name = "index"
  468. with tm.ensure_clean() as path:
  469. original.to_stata(path, version=version)
  470. written_and_read_again = self.read_dta(path)
  471. written_and_read_again = written_and_read_again.set_index("index")
  472. expected = original.copy()
  473. expected.index = expected.index.astype(np.int32)
  474. tm.assert_frame_equal(written_and_read_again, expected)
  475. def test_no_index(self):
  476. columns = ["x", "y"]
  477. original = DataFrame(np.reshape(np.arange(10.0), (5, 2)), columns=columns)
  478. original.index.name = "index_not_written"
  479. with tm.ensure_clean() as path:
  480. original.to_stata(path, write_index=False)
  481. written_and_read_again = self.read_dta(path)
  482. with pytest.raises(KeyError, match=original.index.name):
  483. written_and_read_again["index_not_written"]
  484. def test_string_no_dates(self):
  485. s1 = Series(["a", "A longer string"])
  486. s2 = Series([1.0, 2.0], dtype=np.float64)
  487. original = DataFrame({"s1": s1, "s2": s2})
  488. original.index.name = "index"
  489. with tm.ensure_clean() as path:
  490. original.to_stata(path)
  491. written_and_read_again = self.read_dta(path)
  492. expected = original.copy()
  493. expected.index = expected.index.astype(np.int32)
  494. tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
  495. def test_large_value_conversion(self):
  496. s0 = Series([1, 99], dtype=np.int8)
  497. s1 = Series([1, 127], dtype=np.int8)
  498. s2 = Series([1, 2**15 - 1], dtype=np.int16)
  499. s3 = Series([1, 2**63 - 1], dtype=np.int64)
  500. original = DataFrame({"s0": s0, "s1": s1, "s2": s2, "s3": s3})
  501. original.index.name = "index"
  502. with tm.ensure_clean() as path:
  503. with tm.assert_produces_warning(PossiblePrecisionLoss):
  504. original.to_stata(path)
  505. written_and_read_again = self.read_dta(path)
  506. modified = original.copy()
  507. modified["s1"] = Series(modified["s1"], dtype=np.int16)
  508. modified["s2"] = Series(modified["s2"], dtype=np.int32)
  509. modified["s3"] = Series(modified["s3"], dtype=np.float64)
  510. modified.index = original.index.astype(np.int32)
  511. tm.assert_frame_equal(written_and_read_again.set_index("index"), modified)
  512. def test_dates_invalid_column(self):
  513. original = DataFrame([datetime(2006, 11, 19, 23, 13, 20)])
  514. original.index.name = "index"
  515. with tm.ensure_clean() as path:
  516. with tm.assert_produces_warning(InvalidColumnName):
  517. original.to_stata(path, convert_dates={0: "tc"})
  518. written_and_read_again = self.read_dta(path)
  519. modified = original.copy()
  520. modified.columns = ["_0"]
  521. modified.index = original.index.astype(np.int32)
  522. tm.assert_frame_equal(written_and_read_again.set_index("index"), modified)
  523. def test_105(self, datapath):
  524. # Data obtained from:
  525. # http://go.worldbank.org/ZXY29PVJ21
  526. dpath = datapath("io", "data", "stata", "S4_EDUC1.dta")
  527. df = read_stata(dpath)
  528. df0 = [[1, 1, 3, -2], [2, 1, 2, -2], [4, 1, 1, -2]]
  529. df0 = DataFrame(df0)
  530. df0.columns = ["clustnum", "pri_schl", "psch_num", "psch_dis"]
  531. df0["clustnum"] = df0["clustnum"].astype(np.int16)
  532. df0["pri_schl"] = df0["pri_schl"].astype(np.int8)
  533. df0["psch_num"] = df0["psch_num"].astype(np.int8)
  534. df0["psch_dis"] = df0["psch_dis"].astype(np.float32)
  535. tm.assert_frame_equal(df.head(3), df0)
  536. def test_value_labels_old_format(self, datapath):
  537. # GH 19417
  538. #
  539. # Test that value_labels() returns an empty dict if the file format
  540. # predates supporting value labels.
  541. dpath = datapath("io", "data", "stata", "S4_EDUC1.dta")
  542. with StataReader(dpath) as reader:
  543. assert reader.value_labels() == {}
  544. def test_date_export_formats(self):
  545. columns = ["tc", "td", "tw", "tm", "tq", "th", "ty"]
  546. conversions = {c: c for c in columns}
  547. data = [datetime(2006, 11, 20, 23, 13, 20)] * len(columns)
  548. original = DataFrame([data], columns=columns)
  549. original.index.name = "index"
  550. expected_values = [
  551. datetime(2006, 11, 20, 23, 13, 20), # Time
  552. datetime(2006, 11, 20), # Day
  553. datetime(2006, 11, 19), # Week
  554. datetime(2006, 11, 1), # Month
  555. datetime(2006, 10, 1), # Quarter year
  556. datetime(2006, 7, 1), # Half year
  557. datetime(2006, 1, 1),
  558. ] # Year
  559. expected = DataFrame(
  560. [expected_values],
  561. index=pd.Index([0], dtype=np.int32, name="index"),
  562. columns=columns,
  563. )
  564. with tm.ensure_clean() as path:
  565. original.to_stata(path, convert_dates=conversions)
  566. written_and_read_again = self.read_dta(path)
  567. tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
  568. def test_write_missing_strings(self):
  569. original = DataFrame([["1"], [None]], columns=["foo"])
  570. expected = DataFrame(
  571. [["1"], [""]],
  572. index=pd.Index([0, 1], dtype=np.int32, name="index"),
  573. columns=["foo"],
  574. )
  575. with tm.ensure_clean() as path:
  576. original.to_stata(path)
  577. written_and_read_again = self.read_dta(path)
  578. tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
  579. @pytest.mark.parametrize("version", [114, 117, 118, 119, None])
  580. @pytest.mark.parametrize("byteorder", [">", "<"])
  581. def test_bool_uint(self, byteorder, version):
  582. s0 = Series([0, 1, True], dtype=np.bool_)
  583. s1 = Series([0, 1, 100], dtype=np.uint8)
  584. s2 = Series([0, 1, 255], dtype=np.uint8)
  585. s3 = Series([0, 1, 2**15 - 100], dtype=np.uint16)
  586. s4 = Series([0, 1, 2**16 - 1], dtype=np.uint16)
  587. s5 = Series([0, 1, 2**31 - 100], dtype=np.uint32)
  588. s6 = Series([0, 1, 2**32 - 1], dtype=np.uint32)
  589. original = DataFrame(
  590. {"s0": s0, "s1": s1, "s2": s2, "s3": s3, "s4": s4, "s5": s5, "s6": s6}
  591. )
  592. original.index.name = "index"
  593. expected = original.copy()
  594. expected.index = original.index.astype(np.int32)
  595. expected_types = (
  596. np.int8,
  597. np.int8,
  598. np.int16,
  599. np.int16,
  600. np.int32,
  601. np.int32,
  602. np.float64,
  603. )
  604. for c, t in zip(expected.columns, expected_types):
  605. expected[c] = expected[c].astype(t)
  606. with tm.ensure_clean() as path:
  607. original.to_stata(path, byteorder=byteorder, version=version)
  608. written_and_read_again = self.read_dta(path)
  609. written_and_read_again = written_and_read_again.set_index("index")
  610. tm.assert_frame_equal(written_and_read_again, expected)
  611. def test_variable_labels(self, datapath):
  612. with StataReader(datapath("io", "data", "stata", "stata7_115.dta")) as rdr:
  613. sr_115 = rdr.variable_labels()
  614. with StataReader(datapath("io", "data", "stata", "stata7_117.dta")) as rdr:
  615. sr_117 = rdr.variable_labels()
  616. keys = ("var1", "var2", "var3")
  617. labels = ("label1", "label2", "label3")
  618. for k, v in sr_115.items():
  619. assert k in sr_117
  620. assert v == sr_117[k]
  621. assert k in keys
  622. assert v in labels
  623. def test_minimal_size_col(self):
  624. str_lens = (1, 100, 244)
  625. s = {}
  626. for str_len in str_lens:
  627. s["s" + str(str_len)] = Series(
  628. ["a" * str_len, "b" * str_len, "c" * str_len]
  629. )
  630. original = DataFrame(s)
  631. with tm.ensure_clean() as path:
  632. original.to_stata(path, write_index=False)
  633. with StataReader(path) as sr:
  634. sr._ensure_open() # The `_*list` variables are initialized here
  635. for variable, fmt, typ in zip(sr._varlist, sr._fmtlist, sr._typlist):
  636. assert int(variable[1:]) == int(fmt[1:-1])
  637. assert int(variable[1:]) == typ
  638. def test_excessively_long_string(self):
  639. str_lens = (1, 244, 500)
  640. s = {}
  641. for str_len in str_lens:
  642. s["s" + str(str_len)] = Series(
  643. ["a" * str_len, "b" * str_len, "c" * str_len]
  644. )
  645. original = DataFrame(s)
  646. msg = (
  647. r"Fixed width strings in Stata \.dta files are limited to 244 "
  648. r"\(or fewer\)\ncharacters\. Column 's500' does not satisfy "
  649. r"this restriction\. Use the\n'version=117' parameter to write "
  650. r"the newer \(Stata 13 and later\) format\."
  651. )
  652. with pytest.raises(ValueError, match=msg):
  653. with tm.ensure_clean() as path:
  654. original.to_stata(path)
  655. def test_missing_value_generator(self):
  656. types = ("b", "h", "l")
  657. df = DataFrame([[0.0]], columns=["float_"])
  658. with tm.ensure_clean() as path:
  659. df.to_stata(path)
  660. with StataReader(path) as rdr:
  661. valid_range = rdr.VALID_RANGE
  662. expected_values = ["." + chr(97 + i) for i in range(26)]
  663. expected_values.insert(0, ".")
  664. for t in types:
  665. offset = valid_range[t][1]
  666. for i in range(0, 27):
  667. val = StataMissingValue(offset + 1 + i)
  668. assert val.string == expected_values[i]
  669. # Test extremes for floats
  670. val = StataMissingValue(struct.unpack("<f", b"\x00\x00\x00\x7f")[0])
  671. assert val.string == "."
  672. val = StataMissingValue(struct.unpack("<f", b"\x00\xd0\x00\x7f")[0])
  673. assert val.string == ".z"
  674. # Test extremes for floats
  675. val = StataMissingValue(
  676. struct.unpack("<d", b"\x00\x00\x00\x00\x00\x00\xe0\x7f")[0]
  677. )
  678. assert val.string == "."
  679. val = StataMissingValue(
  680. struct.unpack("<d", b"\x00\x00\x00\x00\x00\x1a\xe0\x7f")[0]
  681. )
  682. assert val.string == ".z"
  683. @pytest.mark.parametrize("file", ["stata8_113", "stata8_115", "stata8_117"])
  684. def test_missing_value_conversion(self, file, datapath):
  685. columns = ["int8_", "int16_", "int32_", "float32_", "float64_"]
  686. smv = StataMissingValue(101)
  687. keys = sorted(smv.MISSING_VALUES.keys())
  688. data = []
  689. for i in range(27):
  690. row = [StataMissingValue(keys[i + (j * 27)]) for j in range(5)]
  691. data.append(row)
  692. expected = DataFrame(data, columns=columns)
  693. parsed = read_stata(
  694. datapath("io", "data", "stata", f"{file}.dta"), convert_missing=True
  695. )
  696. tm.assert_frame_equal(parsed, expected)
  697. def test_big_dates(self, datapath):
  698. yr = [1960, 2000, 9999, 100, 2262, 1677]
  699. mo = [1, 1, 12, 1, 4, 9]
  700. dd = [1, 1, 31, 1, 22, 23]
  701. hr = [0, 0, 23, 0, 0, 0]
  702. mm = [0, 0, 59, 0, 0, 0]
  703. ss = [0, 0, 59, 0, 0, 0]
  704. expected = []
  705. for year, month, day, hour, minute, second in zip(yr, mo, dd, hr, mm, ss):
  706. row = []
  707. for j in range(7):
  708. if j == 0:
  709. row.append(datetime(year, month, day, hour, minute, second))
  710. elif j == 6:
  711. row.append(datetime(year, 1, 1))
  712. else:
  713. row.append(datetime(year, month, day))
  714. expected.append(row)
  715. expected.append([pd.NaT] * 7)
  716. columns = [
  717. "date_tc",
  718. "date_td",
  719. "date_tw",
  720. "date_tm",
  721. "date_tq",
  722. "date_th",
  723. "date_ty",
  724. ]
  725. # Fixes for weekly, quarterly,half,year
  726. expected[2][2] = datetime(9999, 12, 24)
  727. expected[2][3] = datetime(9999, 12, 1)
  728. expected[2][4] = datetime(9999, 10, 1)
  729. expected[2][5] = datetime(9999, 7, 1)
  730. expected[4][2] = datetime(2262, 4, 16)
  731. expected[4][3] = expected[4][4] = datetime(2262, 4, 1)
  732. expected[4][5] = expected[4][6] = datetime(2262, 1, 1)
  733. expected[5][2] = expected[5][3] = expected[5][4] = datetime(1677, 10, 1)
  734. expected[5][5] = expected[5][6] = datetime(1678, 1, 1)
  735. expected = DataFrame(expected, columns=columns, dtype=object)
  736. parsed_115 = read_stata(datapath("io", "data", "stata", "stata9_115.dta"))
  737. parsed_117 = read_stata(datapath("io", "data", "stata", "stata9_117.dta"))
  738. tm.assert_frame_equal(expected, parsed_115, check_datetimelike_compat=True)
  739. tm.assert_frame_equal(expected, parsed_117, check_datetimelike_compat=True)
  740. date_conversion = {c: c[-2:] for c in columns}
  741. # {c : c[-2:] for c in columns}
  742. with tm.ensure_clean() as path:
  743. expected.index.name = "index"
  744. expected.to_stata(path, convert_dates=date_conversion)
  745. written_and_read_again = self.read_dta(path)
  746. tm.assert_frame_equal(
  747. written_and_read_again.set_index("index"),
  748. expected.set_index(expected.index.astype(np.int32)),
  749. check_datetimelike_compat=True,
  750. )
  751. def test_dtype_conversion(self, datapath):
  752. expected = self.read_csv(datapath("io", "data", "stata", "stata6.csv"))
  753. expected["byte_"] = expected["byte_"].astype(np.int8)
  754. expected["int_"] = expected["int_"].astype(np.int16)
  755. expected["long_"] = expected["long_"].astype(np.int32)
  756. expected["float_"] = expected["float_"].astype(np.float32)
  757. expected["double_"] = expected["double_"].astype(np.float64)
  758. expected["date_td"] = expected["date_td"].apply(
  759. datetime.strptime, args=("%Y-%m-%d",)
  760. )
  761. no_conversion = read_stata(
  762. datapath("io", "data", "stata", "stata6_117.dta"), convert_dates=True
  763. )
  764. tm.assert_frame_equal(expected, no_conversion)
  765. conversion = read_stata(
  766. datapath("io", "data", "stata", "stata6_117.dta"),
  767. convert_dates=True,
  768. preserve_dtypes=False,
  769. )
  770. # read_csv types are the same
  771. expected = self.read_csv(datapath("io", "data", "stata", "stata6.csv"))
  772. expected["date_td"] = expected["date_td"].apply(
  773. datetime.strptime, args=("%Y-%m-%d",)
  774. )
  775. tm.assert_frame_equal(expected, conversion)
  776. def test_drop_column(self, datapath):
  777. expected = self.read_csv(datapath("io", "data", "stata", "stata6.csv"))
  778. expected["byte_"] = expected["byte_"].astype(np.int8)
  779. expected["int_"] = expected["int_"].astype(np.int16)
  780. expected["long_"] = expected["long_"].astype(np.int32)
  781. expected["float_"] = expected["float_"].astype(np.float32)
  782. expected["double_"] = expected["double_"].astype(np.float64)
  783. expected["date_td"] = expected["date_td"].apply(
  784. datetime.strptime, args=("%Y-%m-%d",)
  785. )
  786. columns = ["byte_", "int_", "long_"]
  787. expected = expected[columns]
  788. dropped = read_stata(
  789. datapath("io", "data", "stata", "stata6_117.dta"),
  790. convert_dates=True,
  791. columns=columns,
  792. )
  793. tm.assert_frame_equal(expected, dropped)
  794. # See PR 10757
  795. columns = ["int_", "long_", "byte_"]
  796. expected = expected[columns]
  797. reordered = read_stata(
  798. datapath("io", "data", "stata", "stata6_117.dta"),
  799. convert_dates=True,
  800. columns=columns,
  801. )
  802. tm.assert_frame_equal(expected, reordered)
  803. msg = "columns contains duplicate entries"
  804. with pytest.raises(ValueError, match=msg):
  805. columns = ["byte_", "byte_"]
  806. read_stata(
  807. datapath("io", "data", "stata", "stata6_117.dta"),
  808. convert_dates=True,
  809. columns=columns,
  810. )
  811. msg = "The following columns were not found in the Stata data set: not_found"
  812. with pytest.raises(ValueError, match=msg):
  813. columns = ["byte_", "int_", "long_", "not_found"]
  814. read_stata(
  815. datapath("io", "data", "stata", "stata6_117.dta"),
  816. convert_dates=True,
  817. columns=columns,
  818. )
  819. @pytest.mark.parametrize("version", [114, 117, 118, 119, None])
  820. @pytest.mark.filterwarnings(
  821. "ignore:\\nStata value:pandas.io.stata.ValueLabelTypeMismatch"
  822. )
  823. def test_categorical_writing(self, version):
  824. original = DataFrame.from_records(
  825. [
  826. ["one", "ten", "one", "one", "one", 1],
  827. ["two", "nine", "two", "two", "two", 2],
  828. ["three", "eight", "three", "three", "three", 3],
  829. ["four", "seven", 4, "four", "four", 4],
  830. ["five", "six", 5, np.nan, "five", 5],
  831. ["six", "five", 6, np.nan, "six", 6],
  832. ["seven", "four", 7, np.nan, "seven", 7],
  833. ["eight", "three", 8, np.nan, "eight", 8],
  834. ["nine", "two", 9, np.nan, "nine", 9],
  835. ["ten", "one", "ten", np.nan, "ten", 10],
  836. ],
  837. columns=[
  838. "fully_labeled",
  839. "fully_labeled2",
  840. "incompletely_labeled",
  841. "labeled_with_missings",
  842. "float_labelled",
  843. "unlabeled",
  844. ],
  845. )
  846. expected = original.copy()
  847. # these are all categoricals
  848. original = pd.concat(
  849. [original[col].astype("category") for col in original], axis=1
  850. )
  851. expected.index = expected.index.set_names("index").astype(np.int32)
  852. expected["incompletely_labeled"] = expected["incompletely_labeled"].apply(str)
  853. expected["unlabeled"] = expected["unlabeled"].apply(str)
  854. for col in expected:
  855. orig = expected[col].copy()
  856. cat = orig.astype("category")._values
  857. cat = cat.as_ordered()
  858. if col == "unlabeled":
  859. cat = cat.set_categories(orig, ordered=True)
  860. cat.categories.rename(None, inplace=True)
  861. expected[col] = cat
  862. with tm.ensure_clean() as path:
  863. original.to_stata(path, version=version)
  864. written_and_read_again = self.read_dta(path)
  865. res = written_and_read_again.set_index("index")
  866. tm.assert_frame_equal(res, expected)
  867. def test_categorical_warnings_and_errors(self):
  868. # Warning for non-string labels
  869. # Error for labels too long
  870. original = DataFrame.from_records(
  871. [["a" * 10000], ["b" * 10000], ["c" * 10000], ["d" * 10000]],
  872. columns=["Too_long"],
  873. )
  874. original = pd.concat(
  875. [original[col].astype("category") for col in original], axis=1
  876. )
  877. with tm.ensure_clean() as path:
  878. msg = (
  879. "Stata value labels for a single variable must have "
  880. r"a combined length less than 32,000 characters\."
  881. )
  882. with pytest.raises(ValueError, match=msg):
  883. original.to_stata(path)
  884. original = DataFrame.from_records(
  885. [["a"], ["b"], ["c"], ["d"], [1]], columns=["Too_long"]
  886. )
  887. original = pd.concat(
  888. [original[col].astype("category") for col in original], axis=1
  889. )
  890. with tm.assert_produces_warning(ValueLabelTypeMismatch):
  891. original.to_stata(path)
  892. # should get a warning for mixed content
  893. @pytest.mark.parametrize("version", [114, 117, 118, 119, None])
  894. def test_categorical_with_stata_missing_values(self, version):
  895. values = [["a" + str(i)] for i in range(120)]
  896. values.append([np.nan])
  897. original = DataFrame.from_records(values, columns=["many_labels"])
  898. original = pd.concat(
  899. [original[col].astype("category") for col in original], axis=1
  900. )
  901. original.index.name = "index"
  902. with tm.ensure_clean() as path:
  903. original.to_stata(path, version=version)
  904. written_and_read_again = self.read_dta(path)
  905. res = written_and_read_again.set_index("index")
  906. expected = original.copy()
  907. for col in expected:
  908. cat = expected[col]._values
  909. new_cats = cat.remove_unused_categories().categories
  910. cat = cat.set_categories(new_cats, ordered=True)
  911. expected[col] = cat
  912. expected.index = expected.index.astype(np.int32)
  913. tm.assert_frame_equal(res, expected)
  914. @pytest.mark.parametrize("file", ["stata10_115", "stata10_117"])
  915. def test_categorical_order(self, file, datapath):
  916. # Directly construct using expected codes
  917. # Format is is_cat, col_name, labels (in order), underlying data
  918. expected = [
  919. (True, "ordered", ["a", "b", "c", "d", "e"], np.arange(5)),
  920. (True, "reverse", ["a", "b", "c", "d", "e"], np.arange(5)[::-1]),
  921. (True, "noorder", ["a", "b", "c", "d", "e"], np.array([2, 1, 4, 0, 3])),
  922. (True, "floating", ["a", "b", "c", "d", "e"], np.arange(0, 5)),
  923. (True, "float_missing", ["a", "d", "e"], np.array([0, 1, 2, -1, -1])),
  924. (False, "nolabel", [1.0, 2.0, 3.0, 4.0, 5.0], np.arange(5)),
  925. (True, "int32_mixed", ["d", 2, "e", "b", "a"], np.arange(5)),
  926. ]
  927. cols = []
  928. for is_cat, col, labels, codes in expected:
  929. if is_cat:
  930. cols.append(
  931. (col, pd.Categorical.from_codes(codes, labels, ordered=True))
  932. )
  933. else:
  934. cols.append((col, Series(labels, dtype=np.float32)))
  935. expected = DataFrame.from_dict(dict(cols))
  936. # Read with and with out categoricals, ensure order is identical
  937. file = datapath("io", "data", "stata", f"{file}.dta")
  938. parsed = read_stata(file)
  939. tm.assert_frame_equal(expected, parsed)
  940. # Check identity of codes
  941. for col in expected:
  942. if is_categorical_dtype(expected[col].dtype):
  943. tm.assert_series_equal(expected[col].cat.codes, parsed[col].cat.codes)
  944. tm.assert_index_equal(
  945. expected[col].cat.categories, parsed[col].cat.categories
  946. )
  947. @pytest.mark.parametrize("file", ["stata11_115", "stata11_117"])
  948. def test_categorical_sorting(self, file, datapath):
  949. parsed = read_stata(datapath("io", "data", "stata", f"{file}.dta"))
  950. # Sort based on codes, not strings
  951. parsed = parsed.sort_values("srh", na_position="first")
  952. # Don't sort index
  953. parsed.index = pd.RangeIndex(len(parsed))
  954. codes = [-1, -1, 0, 1, 1, 1, 2, 2, 3, 4]
  955. categories = ["Poor", "Fair", "Good", "Very good", "Excellent"]
  956. cat = pd.Categorical.from_codes(
  957. codes=codes, categories=categories, ordered=True
  958. )
  959. expected = Series(cat, name="srh")
  960. tm.assert_series_equal(expected, parsed["srh"])
  961. @pytest.mark.parametrize("file", ["stata10_115", "stata10_117"])
  962. def test_categorical_ordering(self, file, datapath):
  963. file = datapath("io", "data", "stata", f"{file}.dta")
  964. parsed = read_stata(file)
  965. parsed_unordered = read_stata(file, order_categoricals=False)
  966. for col in parsed:
  967. if not is_categorical_dtype(parsed[col].dtype):
  968. continue
  969. assert parsed[col].cat.ordered
  970. assert not parsed_unordered[col].cat.ordered
  971. @pytest.mark.parametrize(
  972. "file",
  973. [
  974. "stata1_117",
  975. "stata2_117",
  976. "stata3_117",
  977. "stata4_117",
  978. "stata5_117",
  979. "stata6_117",
  980. "stata7_117",
  981. "stata8_117",
  982. "stata9_117",
  983. "stata10_117",
  984. "stata11_117",
  985. ],
  986. )
  987. @pytest.mark.parametrize("chunksize", [1, 2])
  988. @pytest.mark.parametrize("convert_categoricals", [False, True])
  989. @pytest.mark.parametrize("convert_dates", [False, True])
  990. def test_read_chunks_117(
  991. self, file, chunksize, convert_categoricals, convert_dates, datapath
  992. ):
  993. fname = datapath("io", "data", "stata", f"{file}.dta")
  994. with warnings.catch_warnings(record=True):
  995. warnings.simplefilter("always")
  996. parsed = read_stata(
  997. fname,
  998. convert_categoricals=convert_categoricals,
  999. convert_dates=convert_dates,
  1000. )
  1001. with read_stata(
  1002. fname,
  1003. iterator=True,
  1004. convert_categoricals=convert_categoricals,
  1005. convert_dates=convert_dates,
  1006. ) as itr:
  1007. pos = 0
  1008. for j in range(5):
  1009. with warnings.catch_warnings(record=True):
  1010. warnings.simplefilter("always")
  1011. try:
  1012. chunk = itr.read(chunksize)
  1013. except StopIteration:
  1014. break
  1015. from_frame = parsed.iloc[pos : pos + chunksize, :].copy()
  1016. from_frame = self._convert_categorical(from_frame)
  1017. tm.assert_frame_equal(
  1018. from_frame, chunk, check_dtype=False, check_datetimelike_compat=True
  1019. )
  1020. pos += chunksize
  1021. @staticmethod
  1022. def _convert_categorical(from_frame: DataFrame) -> DataFrame:
  1023. """
  1024. Emulate the categorical casting behavior we expect from roundtripping.
  1025. """
  1026. for col in from_frame:
  1027. ser = from_frame[col]
  1028. if is_categorical_dtype(ser.dtype):
  1029. cat = ser._values.remove_unused_categories()
  1030. if cat.categories.dtype == object:
  1031. categories = pd.Index._with_infer(cat.categories._values)
  1032. cat = cat.set_categories(categories)
  1033. from_frame[col] = cat
  1034. return from_frame
  1035. def test_iterator(self, datapath):
  1036. fname = datapath("io", "data", "stata", "stata3_117.dta")
  1037. parsed = read_stata(fname)
  1038. with read_stata(fname, iterator=True) as itr:
  1039. chunk = itr.read(5)
  1040. tm.assert_frame_equal(parsed.iloc[0:5, :], chunk)
  1041. with read_stata(fname, chunksize=5) as itr:
  1042. chunk = list(itr)
  1043. tm.assert_frame_equal(parsed.iloc[0:5, :], chunk[0])
  1044. with read_stata(fname, iterator=True) as itr:
  1045. chunk = itr.get_chunk(5)
  1046. tm.assert_frame_equal(parsed.iloc[0:5, :], chunk)
  1047. with read_stata(fname, chunksize=5) as itr:
  1048. chunk = itr.get_chunk()
  1049. tm.assert_frame_equal(parsed.iloc[0:5, :], chunk)
  1050. # GH12153
  1051. with read_stata(fname, chunksize=4) as itr:
  1052. from_chunks = pd.concat(itr)
  1053. tm.assert_frame_equal(parsed, from_chunks)
  1054. @pytest.mark.parametrize(
  1055. "file",
  1056. [
  1057. "stata2_115",
  1058. "stata3_115",
  1059. "stata4_115",
  1060. "stata5_115",
  1061. "stata6_115",
  1062. "stata7_115",
  1063. "stata8_115",
  1064. "stata9_115",
  1065. "stata10_115",
  1066. "stata11_115",
  1067. ],
  1068. )
  1069. @pytest.mark.parametrize("chunksize", [1, 2])
  1070. @pytest.mark.parametrize("convert_categoricals", [False, True])
  1071. @pytest.mark.parametrize("convert_dates", [False, True])
  1072. def test_read_chunks_115(
  1073. self, file, chunksize, convert_categoricals, convert_dates, datapath
  1074. ):
  1075. fname = datapath("io", "data", "stata", f"{file}.dta")
  1076. # Read the whole file
  1077. with warnings.catch_warnings(record=True):
  1078. warnings.simplefilter("always")
  1079. parsed = read_stata(
  1080. fname,
  1081. convert_categoricals=convert_categoricals,
  1082. convert_dates=convert_dates,
  1083. )
  1084. # Compare to what we get when reading by chunk
  1085. with read_stata(
  1086. fname,
  1087. iterator=True,
  1088. convert_dates=convert_dates,
  1089. convert_categoricals=convert_categoricals,
  1090. ) as itr:
  1091. pos = 0
  1092. for j in range(5):
  1093. with warnings.catch_warnings(record=True):
  1094. warnings.simplefilter("always")
  1095. try:
  1096. chunk = itr.read(chunksize)
  1097. except StopIteration:
  1098. break
  1099. from_frame = parsed.iloc[pos : pos + chunksize, :].copy()
  1100. from_frame = self._convert_categorical(from_frame)
  1101. tm.assert_frame_equal(
  1102. from_frame, chunk, check_dtype=False, check_datetimelike_compat=True
  1103. )
  1104. pos += chunksize
  1105. def test_read_chunks_columns(self, datapath):
  1106. fname = datapath("io", "data", "stata", "stata3_117.dta")
  1107. columns = ["quarter", "cpi", "m1"]
  1108. chunksize = 2
  1109. parsed = read_stata(fname, columns=columns)
  1110. with read_stata(fname, iterator=True) as itr:
  1111. pos = 0
  1112. for j in range(5):
  1113. chunk = itr.read(chunksize, columns=columns)
  1114. if chunk is None:
  1115. break
  1116. from_frame = parsed.iloc[pos : pos + chunksize, :]
  1117. tm.assert_frame_equal(from_frame, chunk, check_dtype=False)
  1118. pos += chunksize
  1119. @pytest.mark.parametrize("version", [114, 117, 118, 119, None])
  1120. def test_write_variable_labels(self, version, mixed_frame):
  1121. # GH 13631, add support for writing variable labels
  1122. mixed_frame.index.name = "index"
  1123. variable_labels = {"a": "City Rank", "b": "City Exponent", "c": "City"}
  1124. with tm.ensure_clean() as path:
  1125. mixed_frame.to_stata(path, variable_labels=variable_labels, version=version)
  1126. with StataReader(path) as sr:
  1127. read_labels = sr.variable_labels()
  1128. expected_labels = {
  1129. "index": "",
  1130. "a": "City Rank",
  1131. "b": "City Exponent",
  1132. "c": "City",
  1133. }
  1134. assert read_labels == expected_labels
  1135. variable_labels["index"] = "The Index"
  1136. with tm.ensure_clean() as path:
  1137. mixed_frame.to_stata(path, variable_labels=variable_labels, version=version)
  1138. with StataReader(path) as sr:
  1139. read_labels = sr.variable_labels()
  1140. assert read_labels == variable_labels
  1141. @pytest.mark.parametrize("version", [114, 117, 118, 119, None])
  1142. def test_invalid_variable_labels(self, version, mixed_frame):
  1143. mixed_frame.index.name = "index"
  1144. variable_labels = {"a": "very long" * 10, "b": "City Exponent", "c": "City"}
  1145. with tm.ensure_clean() as path:
  1146. msg = "Variable labels must be 80 characters or fewer"
  1147. with pytest.raises(ValueError, match=msg):
  1148. mixed_frame.to_stata(
  1149. path, variable_labels=variable_labels, version=version
  1150. )
  1151. @pytest.mark.parametrize("version", [114, 117])
  1152. def test_invalid_variable_label_encoding(self, version, mixed_frame):
  1153. mixed_frame.index.name = "index"
  1154. variable_labels = {"a": "very long" * 10, "b": "City Exponent", "c": "City"}
  1155. variable_labels["a"] = "invalid character Œ"
  1156. with tm.ensure_clean() as path:
  1157. with pytest.raises(
  1158. ValueError, match="Variable labels must contain only characters"
  1159. ):
  1160. mixed_frame.to_stata(
  1161. path, variable_labels=variable_labels, version=version
  1162. )
  1163. def test_write_variable_label_errors(self, mixed_frame):
  1164. values = ["\u03A1", "\u0391", "\u039D", "\u0394", "\u0391", "\u03A3"]
  1165. variable_labels_utf8 = {
  1166. "a": "City Rank",
  1167. "b": "City Exponent",
  1168. "c": "".join(values),
  1169. }
  1170. msg = (
  1171. "Variable labels must contain only characters that can be "
  1172. "encoded in Latin-1"
  1173. )
  1174. with pytest.raises(ValueError, match=msg):
  1175. with tm.ensure_clean() as path:
  1176. mixed_frame.to_stata(path, variable_labels=variable_labels_utf8)
  1177. variable_labels_long = {
  1178. "a": "City Rank",
  1179. "b": "City Exponent",
  1180. "c": "A very, very, very long variable label "
  1181. "that is too long for Stata which means "
  1182. "that it has more than 80 characters",
  1183. }
  1184. msg = "Variable labels must be 80 characters or fewer"
  1185. with pytest.raises(ValueError, match=msg):
  1186. with tm.ensure_clean() as path:
  1187. mixed_frame.to_stata(path, variable_labels=variable_labels_long)
  1188. def test_default_date_conversion(self):
  1189. # GH 12259
  1190. dates = [
  1191. dt.datetime(1999, 12, 31, 12, 12, 12, 12000),
  1192. dt.datetime(2012, 12, 21, 12, 21, 12, 21000),
  1193. dt.datetime(1776, 7, 4, 7, 4, 7, 4000),
  1194. ]
  1195. original = DataFrame(
  1196. {
  1197. "nums": [1.0, 2.0, 3.0],
  1198. "strs": ["apple", "banana", "cherry"],
  1199. "dates": dates,
  1200. }
  1201. )
  1202. with tm.ensure_clean() as path:
  1203. original.to_stata(path, write_index=False)
  1204. reread = read_stata(path, convert_dates=True)
  1205. tm.assert_frame_equal(original, reread)
  1206. original.to_stata(path, write_index=False, convert_dates={"dates": "tc"})
  1207. direct = read_stata(path, convert_dates=True)
  1208. tm.assert_frame_equal(reread, direct)
  1209. dates_idx = original.columns.tolist().index("dates")
  1210. original.to_stata(path, write_index=False, convert_dates={dates_idx: "tc"})
  1211. direct = read_stata(path, convert_dates=True)
  1212. tm.assert_frame_equal(reread, direct)
  1213. def test_unsupported_type(self):
  1214. original = DataFrame({"a": [1 + 2j, 2 + 4j]})
  1215. msg = "Data type complex128 not supported"
  1216. with pytest.raises(NotImplementedError, match=msg):
  1217. with tm.ensure_clean() as path:
  1218. original.to_stata(path)
  1219. def test_unsupported_datetype(self):
  1220. dates = [
  1221. dt.datetime(1999, 12, 31, 12, 12, 12, 12000),
  1222. dt.datetime(2012, 12, 21, 12, 21, 12, 21000),
  1223. dt.datetime(1776, 7, 4, 7, 4, 7, 4000),
  1224. ]
  1225. original = DataFrame(
  1226. {
  1227. "nums": [1.0, 2.0, 3.0],
  1228. "strs": ["apple", "banana", "cherry"],
  1229. "dates": dates,
  1230. }
  1231. )
  1232. msg = "Format %tC not implemented"
  1233. with pytest.raises(NotImplementedError, match=msg):
  1234. with tm.ensure_clean() as path:
  1235. original.to_stata(path, convert_dates={"dates": "tC"})
  1236. dates = pd.date_range("1-1-1990", periods=3, tz="Asia/Hong_Kong")
  1237. original = DataFrame(
  1238. {
  1239. "nums": [1.0, 2.0, 3.0],
  1240. "strs": ["apple", "banana", "cherry"],
  1241. "dates": dates,
  1242. }
  1243. )
  1244. with pytest.raises(NotImplementedError, match="Data type datetime64"):
  1245. with tm.ensure_clean() as path:
  1246. original.to_stata(path)
  1247. def test_repeated_column_labels(self, datapath):
  1248. # GH 13923, 25772
  1249. msg = """
  1250. Value labels for column ethnicsn are not unique. These cannot be converted to
  1251. pandas categoricals.
  1252. Either read the file with `convert_categoricals` set to False or use the
  1253. low level interface in `StataReader` to separately read the values and the
  1254. value_labels.
  1255. The repeated labels are:\n-+\nwolof
  1256. """
  1257. with pytest.raises(ValueError, match=msg):
  1258. read_stata(
  1259. datapath("io", "data", "stata", "stata15.dta"),
  1260. convert_categoricals=True,
  1261. )
  1262. def test_stata_111(self, datapath):
  1263. # 111 is an old version but still used by current versions of
  1264. # SAS when exporting to Stata format. We do not know of any
  1265. # on-line documentation for this version.
  1266. df = read_stata(datapath("io", "data", "stata", "stata7_111.dta"))
  1267. original = DataFrame(
  1268. {
  1269. "y": [1, 1, 1, 1, 1, 0, 0, np.NaN, 0, 0],
  1270. "x": [1, 2, 1, 3, np.NaN, 4, 3, 5, 1, 6],
  1271. "w": [2, np.NaN, 5, 2, 4, 4, 3, 1, 2, 3],
  1272. "z": ["a", "b", "c", "d", "e", "", "g", "h", "i", "j"],
  1273. }
  1274. )
  1275. original = original[["y", "x", "w", "z"]]
  1276. tm.assert_frame_equal(original, df)
  1277. def test_out_of_range_double(self):
  1278. # GH 14618
  1279. df = DataFrame(
  1280. {
  1281. "ColumnOk": [0.0, np.finfo(np.double).eps, 4.49423283715579e307],
  1282. "ColumnTooBig": [0.0, np.finfo(np.double).eps, np.finfo(np.double).max],
  1283. }
  1284. )
  1285. msg = (
  1286. r"Column ColumnTooBig has a maximum value \(.+\) outside the range "
  1287. r"supported by Stata \(.+\)"
  1288. )
  1289. with pytest.raises(ValueError, match=msg):
  1290. with tm.ensure_clean() as path:
  1291. df.to_stata(path)
  1292. def test_out_of_range_float(self):
  1293. original = DataFrame(
  1294. {
  1295. "ColumnOk": [
  1296. 0.0,
  1297. np.finfo(np.float32).eps,
  1298. np.finfo(np.float32).max / 10.0,
  1299. ],
  1300. "ColumnTooBig": [
  1301. 0.0,
  1302. np.finfo(np.float32).eps,
  1303. np.finfo(np.float32).max,
  1304. ],
  1305. }
  1306. )
  1307. original.index.name = "index"
  1308. for col in original:
  1309. original[col] = original[col].astype(np.float32)
  1310. with tm.ensure_clean() as path:
  1311. original.to_stata(path)
  1312. reread = read_stata(path)
  1313. original["ColumnTooBig"] = original["ColumnTooBig"].astype(np.float64)
  1314. expected = original.copy()
  1315. expected.index = expected.index.astype(np.int32)
  1316. tm.assert_frame_equal(reread.set_index("index"), expected)
  1317. @pytest.mark.parametrize("infval", [np.inf, -np.inf])
  1318. def test_inf(self, infval):
  1319. # GH 45350
  1320. df = DataFrame({"WithoutInf": [0.0, 1.0], "WithInf": [2.0, infval]})
  1321. msg = (
  1322. "Column WithInf contains infinity or -infinity"
  1323. "which is outside the range supported by Stata."
  1324. )
  1325. with pytest.raises(ValueError, match=msg):
  1326. with tm.ensure_clean() as path:
  1327. df.to_stata(path)
  1328. def test_path_pathlib(self):
  1329. df = tm.makeDataFrame()
  1330. df.index.name = "index"
  1331. reader = lambda x: read_stata(x).set_index("index")
  1332. result = tm.round_trip_pathlib(df.to_stata, reader)
  1333. tm.assert_frame_equal(df, result)
  1334. def test_pickle_path_localpath(self):
  1335. df = tm.makeDataFrame()
  1336. df.index.name = "index"
  1337. reader = lambda x: read_stata(x).set_index("index")
  1338. result = tm.round_trip_localpath(df.to_stata, reader)
  1339. tm.assert_frame_equal(df, result)
  1340. @pytest.mark.parametrize("write_index", [True, False])
  1341. def test_value_labels_iterator(self, write_index):
  1342. # GH 16923
  1343. d = {"A": ["B", "E", "C", "A", "E"]}
  1344. df = DataFrame(data=d)
  1345. df["A"] = df["A"].astype("category")
  1346. with tm.ensure_clean() as path:
  1347. df.to_stata(path, write_index=write_index)
  1348. with read_stata(path, iterator=True) as dta_iter:
  1349. value_labels = dta_iter.value_labels()
  1350. assert value_labels == {"A": {0: "A", 1: "B", 2: "C", 3: "E"}}
  1351. def test_set_index(self):
  1352. # GH 17328
  1353. df = tm.makeDataFrame()
  1354. df.index.name = "index"
  1355. with tm.ensure_clean() as path:
  1356. df.to_stata(path)
  1357. reread = read_stata(path, index_col="index")
  1358. tm.assert_frame_equal(df, reread)
  1359. @pytest.mark.parametrize(
  1360. "column", ["ms", "day", "week", "month", "qtr", "half", "yr"]
  1361. )
  1362. def test_date_parsing_ignores_format_details(self, column, datapath):
  1363. # GH 17797
  1364. #
  1365. # Test that display formats are ignored when determining if a numeric
  1366. # column is a date value.
  1367. #
  1368. # All date types are stored as numbers and format associated with the
  1369. # column denotes both the type of the date and the display format.
  1370. #
  1371. # STATA supports 9 date types which each have distinct units. We test 7
  1372. # of the 9 types, ignoring %tC and %tb. %tC is a variant of %tc that
  1373. # accounts for leap seconds and %tb relies on STATAs business calendar.
  1374. df = read_stata(datapath("io", "data", "stata", "stata13_dates.dta"))
  1375. unformatted = df.loc[0, column]
  1376. formatted = df.loc[0, column + "_fmt"]
  1377. assert unformatted == formatted
  1378. def test_writer_117(self):
  1379. original = DataFrame(
  1380. data=[
  1381. [
  1382. "string",
  1383. "object",
  1384. 1,
  1385. 1,
  1386. 1,
  1387. 1.1,
  1388. 1.1,
  1389. np.datetime64("2003-12-25"),
  1390. "a",
  1391. "a" * 2045,
  1392. "a" * 5000,
  1393. "a",
  1394. ],
  1395. [
  1396. "string-1",
  1397. "object-1",
  1398. 1,
  1399. 1,
  1400. 1,
  1401. 1.1,
  1402. 1.1,
  1403. np.datetime64("2003-12-26"),
  1404. "b",
  1405. "b" * 2045,
  1406. "",
  1407. "",
  1408. ],
  1409. ],
  1410. columns=[
  1411. "string",
  1412. "object",
  1413. "int8",
  1414. "int16",
  1415. "int32",
  1416. "float32",
  1417. "float64",
  1418. "datetime",
  1419. "s1",
  1420. "s2045",
  1421. "srtl",
  1422. "forced_strl",
  1423. ],
  1424. )
  1425. original["object"] = Series(original["object"], dtype=object)
  1426. original["int8"] = Series(original["int8"], dtype=np.int8)
  1427. original["int16"] = Series(original["int16"], dtype=np.int16)
  1428. original["int32"] = original["int32"].astype(np.int32)
  1429. original["float32"] = Series(original["float32"], dtype=np.float32)
  1430. original.index.name = "index"
  1431. original.index = original.index.astype(np.int32)
  1432. copy = original.copy()
  1433. with tm.ensure_clean() as path:
  1434. original.to_stata(
  1435. path,
  1436. convert_dates={"datetime": "tc"},
  1437. convert_strl=["forced_strl"],
  1438. version=117,
  1439. )
  1440. written_and_read_again = self.read_dta(path)
  1441. # original.index is np.int32, read index is np.int64
  1442. tm.assert_frame_equal(
  1443. written_and_read_again.set_index("index"),
  1444. original,
  1445. check_index_type=False,
  1446. )
  1447. tm.assert_frame_equal(original, copy)
  1448. def test_convert_strl_name_swap(self):
  1449. original = DataFrame(
  1450. [["a" * 3000, "A", "apple"], ["b" * 1000, "B", "banana"]],
  1451. columns=["long1" * 10, "long", 1],
  1452. )
  1453. original.index.name = "index"
  1454. with tm.assert_produces_warning(InvalidColumnName):
  1455. with tm.ensure_clean() as path:
  1456. original.to_stata(path, convert_strl=["long", 1], version=117)
  1457. reread = self.read_dta(path)
  1458. reread = reread.set_index("index")
  1459. reread.columns = original.columns
  1460. tm.assert_frame_equal(reread, original, check_index_type=False)
  1461. def test_invalid_date_conversion(self):
  1462. # GH 12259
  1463. dates = [
  1464. dt.datetime(1999, 12, 31, 12, 12, 12, 12000),
  1465. dt.datetime(2012, 12, 21, 12, 21, 12, 21000),
  1466. dt.datetime(1776, 7, 4, 7, 4, 7, 4000),
  1467. ]
  1468. original = DataFrame(
  1469. {
  1470. "nums": [1.0, 2.0, 3.0],
  1471. "strs": ["apple", "banana", "cherry"],
  1472. "dates": dates,
  1473. }
  1474. )
  1475. with tm.ensure_clean() as path:
  1476. msg = "convert_dates key must be a column or an integer"
  1477. with pytest.raises(ValueError, match=msg):
  1478. original.to_stata(path, convert_dates={"wrong_name": "tc"})
  1479. @pytest.mark.parametrize("version", [114, 117, 118, 119, None])
  1480. def test_nonfile_writing(self, version):
  1481. # GH 21041
  1482. bio = io.BytesIO()
  1483. df = tm.makeDataFrame()
  1484. df.index.name = "index"
  1485. with tm.ensure_clean() as path:
  1486. df.to_stata(bio, version=version)
  1487. bio.seek(0)
  1488. with open(path, "wb") as dta:
  1489. dta.write(bio.read())
  1490. reread = read_stata(path, index_col="index")
  1491. tm.assert_frame_equal(df, reread)
  1492. def test_gzip_writing(self):
  1493. # writing version 117 requires seek and cannot be used with gzip
  1494. df = tm.makeDataFrame()
  1495. df.index.name = "index"
  1496. with tm.ensure_clean() as path:
  1497. with gzip.GzipFile(path, "wb") as gz:
  1498. df.to_stata(gz, version=114)
  1499. with gzip.GzipFile(path, "rb") as gz:
  1500. reread = read_stata(gz, index_col="index")
  1501. tm.assert_frame_equal(df, reread)
  1502. def test_unicode_dta_118(self, datapath):
  1503. unicode_df = self.read_dta(datapath("io", "data", "stata", "stata16_118.dta"))
  1504. columns = ["utf8", "latin1", "ascii", "utf8_strl", "ascii_strl"]
  1505. values = [
  1506. ["ραηδας", "PÄNDÄS", "p", "ραηδας", "p"],
  1507. ["ƤĀńĐąŜ", "Ö", "a", "ƤĀńĐąŜ", "a"],
  1508. ["ᴘᴀᴎᴅᴀS", "Ü", "n", "ᴘᴀᴎᴅᴀS", "n"],
  1509. [" ", " ", "d", " ", "d"],
  1510. [" ", "", "a", " ", "a"],
  1511. ["", "", "s", "", "s"],
  1512. ["", "", " ", "", " "],
  1513. ]
  1514. expected = DataFrame(values, columns=columns)
  1515. tm.assert_frame_equal(unicode_df, expected)
  1516. def test_mixed_string_strl(self):
  1517. # GH 23633
  1518. output = [{"mixed": "string" * 500, "number": 0}, {"mixed": None, "number": 1}]
  1519. output = DataFrame(output)
  1520. output.number = output.number.astype("int32")
  1521. with tm.ensure_clean() as path:
  1522. output.to_stata(path, write_index=False, version=117)
  1523. reread = read_stata(path)
  1524. expected = output.fillna("")
  1525. tm.assert_frame_equal(reread, expected)
  1526. # Check strl supports all None (null)
  1527. output["mixed"] = None
  1528. output.to_stata(
  1529. path, write_index=False, convert_strl=["mixed"], version=117
  1530. )
  1531. reread = read_stata(path)
  1532. expected = output.fillna("")
  1533. tm.assert_frame_equal(reread, expected)
  1534. @pytest.mark.parametrize("version", [114, 117, 118, 119, None])
  1535. def test_all_none_exception(self, version):
  1536. output = [{"none": "none", "number": 0}, {"none": None, "number": 1}]
  1537. output = DataFrame(output)
  1538. output["none"] = None
  1539. with tm.ensure_clean() as path:
  1540. with pytest.raises(ValueError, match="Column `none` cannot be exported"):
  1541. output.to_stata(path, version=version)
  1542. @pytest.mark.parametrize("version", [114, 117, 118, 119, None])
  1543. def test_invalid_file_not_written(self, version):
  1544. content = "Here is one __�__ Another one __·__ Another one __½__"
  1545. df = DataFrame([content], columns=["invalid"])
  1546. with tm.ensure_clean() as path:
  1547. msg1 = (
  1548. r"'latin-1' codec can't encode character '\\ufffd' "
  1549. r"in position 14: ordinal not in range\(256\)"
  1550. )
  1551. msg2 = (
  1552. "'ascii' codec can't decode byte 0xef in position 14: "
  1553. r"ordinal not in range\(128\)"
  1554. )
  1555. with pytest.raises(UnicodeEncodeError, match=f"{msg1}|{msg2}"):
  1556. df.to_stata(path)
  1557. def test_strl_latin1(self):
  1558. # GH 23573, correct GSO data to reflect correct size
  1559. output = DataFrame(
  1560. [["pandas"] * 2, ["þâÑÐŧ"] * 2], columns=["var_str", "var_strl"]
  1561. )
  1562. with tm.ensure_clean() as path:
  1563. output.to_stata(path, version=117, convert_strl=["var_strl"])
  1564. with open(path, "rb") as reread:
  1565. content = reread.read()
  1566. expected = "þâÑÐŧ"
  1567. assert expected.encode("latin-1") in content
  1568. assert expected.encode("utf-8") in content
  1569. gsos = content.split(b"strls")[1][1:-2]
  1570. for gso in gsos.split(b"GSO")[1:]:
  1571. val = gso.split(b"\x00")[-2]
  1572. size = gso[gso.find(b"\x82") + 1]
  1573. assert len(val) == size - 1
  1574. def test_encoding_latin1_118(self, datapath):
  1575. # GH 25960
  1576. msg = """
  1577. One or more strings in the dta file could not be decoded using utf-8, and
  1578. so the fallback encoding of latin-1 is being used. This can happen when a file
  1579. has been incorrectly encoded by Stata or some other software. You should verify
  1580. the string values returned are correct."""
  1581. # Move path outside of read_stata, or else assert_produces_warning
  1582. # will block pytests skip mechanism from triggering (failing the test)
  1583. # if the path is not present
  1584. path = datapath("io", "data", "stata", "stata1_encoding_118.dta")
  1585. with tm.assert_produces_warning(UnicodeWarning) as w:
  1586. encoded = read_stata(path)
  1587. assert len(w) == 151
  1588. assert w[0].message.args[0] == msg
  1589. expected = DataFrame([["Düsseldorf"]] * 151, columns=["kreis1849"])
  1590. tm.assert_frame_equal(encoded, expected)
  1591. @pytest.mark.slow
  1592. def test_stata_119(self, datapath):
  1593. # Gzipped since contains 32,999 variables and uncompressed is 20MiB
  1594. with gzip.open(
  1595. datapath("io", "data", "stata", "stata1_119.dta.gz"), "rb"
  1596. ) as gz:
  1597. df = read_stata(gz)
  1598. assert df.shape == (1, 32999)
  1599. assert df.iloc[0, 6] == "A" * 3000
  1600. assert df.iloc[0, 7] == 3.14
  1601. assert df.iloc[0, -1] == 1
  1602. assert df.iloc[0, 0] == pd.Timestamp(datetime(2012, 12, 21, 21, 12, 21))
  1603. @pytest.mark.parametrize("version", [118, 119, None])
  1604. def test_utf8_writer(self, version):
  1605. cat = pd.Categorical(["a", "β", "ĉ"], ordered=True)
  1606. data = DataFrame(
  1607. [
  1608. [1.0, 1, "ᴬ", "ᴀ relatively long ŝtring"],
  1609. [2.0, 2, "ᴮ", ""],
  1610. [3.0, 3, "ᴰ", None],
  1611. ],
  1612. columns=["Å", "β", "ĉ", "strls"],
  1613. )
  1614. data["ᴐᴬᵀ"] = cat
  1615. variable_labels = {
  1616. "Å": "apple",
  1617. "β": "ᵈᵉᵊ",
  1618. "ĉ": "ᴎტჄႲႳႴႶႺ",
  1619. "strls": "Long Strings",
  1620. "ᴐᴬᵀ": "",
  1621. }
  1622. data_label = "ᴅaᵀa-label"
  1623. value_labels = {"β": {1: "label", 2: "æøå", 3: "ŋot valid latin-1"}}
  1624. data["β"] = data["β"].astype(np.int32)
  1625. with tm.ensure_clean() as path:
  1626. writer = StataWriterUTF8(
  1627. path,
  1628. data,
  1629. data_label=data_label,
  1630. convert_strl=["strls"],
  1631. variable_labels=variable_labels,
  1632. write_index=False,
  1633. version=version,
  1634. value_labels=value_labels,
  1635. )
  1636. writer.write_file()
  1637. reread_encoded = read_stata(path)
  1638. # Missing is intentionally converted to empty strl
  1639. data["strls"] = data["strls"].fillna("")
  1640. # Variable with value labels is reread as categorical
  1641. data["β"] = (
  1642. data["β"].replace(value_labels["β"]).astype("category").cat.as_ordered()
  1643. )
  1644. tm.assert_frame_equal(data, reread_encoded)
  1645. with StataReader(path) as reader:
  1646. assert reader.data_label == data_label
  1647. assert reader.variable_labels() == variable_labels
  1648. data.to_stata(path, version=version, write_index=False)
  1649. reread_to_stata = read_stata(path)
  1650. tm.assert_frame_equal(data, reread_to_stata)
  1651. def test_writer_118_exceptions(self):
  1652. df = DataFrame(np.zeros((1, 33000), dtype=np.int8))
  1653. with tm.ensure_clean() as path:
  1654. with pytest.raises(ValueError, match="version must be either 118 or 119."):
  1655. StataWriterUTF8(path, df, version=117)
  1656. with tm.ensure_clean() as path:
  1657. with pytest.raises(ValueError, match="You must use version 119"):
  1658. StataWriterUTF8(path, df, version=118)
  1659. @pytest.mark.parametrize("version", [105, 108, 111, 113, 114])
  1660. def test_backward_compat(version, datapath):
  1661. data_base = datapath("io", "data", "stata")
  1662. ref = os.path.join(data_base, "stata-compat-118.dta")
  1663. old = os.path.join(data_base, f"stata-compat-{version}.dta")
  1664. expected = read_stata(ref)
  1665. old_dta = read_stata(old)
  1666. tm.assert_frame_equal(old_dta, expected, check_dtype=False)
  1667. def test_direct_read(datapath, monkeypatch):
  1668. file_path = datapath("io", "data", "stata", "stata-compat-118.dta")
  1669. # Test that opening a file path doesn't buffer the file.
  1670. with StataReader(file_path) as reader:
  1671. # Must not have been buffered to memory
  1672. assert not reader.read().empty
  1673. assert not isinstance(reader._path_or_buf, io.BytesIO)
  1674. # Test that we use a given fp exactly, if possible.
  1675. with open(file_path, "rb") as fp:
  1676. with StataReader(fp) as reader:
  1677. assert not reader.read().empty
  1678. assert reader._path_or_buf is fp
  1679. # Test that we use a given BytesIO exactly, if possible.
  1680. with open(file_path, "rb") as fp:
  1681. with io.BytesIO(fp.read()) as bio:
  1682. with StataReader(bio) as reader:
  1683. assert not reader.read().empty
  1684. assert reader._path_or_buf is bio
  1685. def test_statareader_warns_when_used_without_context(datapath):
  1686. file_path = datapath("io", "data", "stata", "stata-compat-118.dta")
  1687. with tm.assert_produces_warning(
  1688. ResourceWarning,
  1689. match="without using a context manager",
  1690. ):
  1691. sr = StataReader(file_path)
  1692. sr.read()
  1693. with tm.assert_produces_warning(
  1694. FutureWarning,
  1695. match="is not part of the public API",
  1696. ):
  1697. sr.close()
  1698. @pytest.mark.parametrize("version", [114, 117, 118, 119, None])
  1699. @pytest.mark.parametrize("use_dict", [True, False])
  1700. @pytest.mark.parametrize("infer", [True, False])
  1701. def test_compression(compression, version, use_dict, infer):
  1702. file_name = "dta_inferred_compression.dta"
  1703. if compression:
  1704. if use_dict:
  1705. file_ext = compression
  1706. else:
  1707. file_ext = _compression_to_extension[compression]
  1708. file_name += f".{file_ext}"
  1709. compression_arg = compression
  1710. if infer:
  1711. compression_arg = "infer"
  1712. if use_dict:
  1713. compression_arg = {"method": compression}
  1714. df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
  1715. df.index.name = "index"
  1716. with tm.ensure_clean(file_name) as path:
  1717. df.to_stata(path, version=version, compression=compression_arg)
  1718. if compression == "gzip":
  1719. with gzip.open(path, "rb") as comp:
  1720. fp = io.BytesIO(comp.read())
  1721. elif compression == "zip":
  1722. with zipfile.ZipFile(path, "r") as comp:
  1723. fp = io.BytesIO(comp.read(comp.filelist[0]))
  1724. elif compression == "tar":
  1725. with tarfile.open(path) as tar:
  1726. fp = io.BytesIO(tar.extractfile(tar.getnames()[0]).read())
  1727. elif compression == "bz2":
  1728. with bz2.open(path, "rb") as comp:
  1729. fp = io.BytesIO(comp.read())
  1730. elif compression == "zstd":
  1731. zstd = pytest.importorskip("zstandard")
  1732. with zstd.open(path, "rb") as comp:
  1733. fp = io.BytesIO(comp.read())
  1734. elif compression == "xz":
  1735. lzma = pytest.importorskip("lzma")
  1736. with lzma.open(path, "rb") as comp:
  1737. fp = io.BytesIO(comp.read())
  1738. elif compression is None:
  1739. fp = path
  1740. reread = read_stata(fp, index_col="index")
  1741. expected = df.copy()
  1742. expected.index = expected.index.astype(np.int32)
  1743. tm.assert_frame_equal(reread, expected)
  1744. @pytest.mark.parametrize("method", ["zip", "infer"])
  1745. @pytest.mark.parametrize("file_ext", [None, "dta", "zip"])
  1746. def test_compression_dict(method, file_ext):
  1747. file_name = f"test.{file_ext}"
  1748. archive_name = "test.dta"
  1749. df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
  1750. df.index.name = "index"
  1751. with tm.ensure_clean(file_name) as path:
  1752. compression = {"method": method, "archive_name": archive_name}
  1753. df.to_stata(path, compression=compression)
  1754. if method == "zip" or file_ext == "zip":
  1755. with zipfile.ZipFile(path, "r") as zp:
  1756. assert len(zp.filelist) == 1
  1757. assert zp.filelist[0].filename == archive_name
  1758. fp = io.BytesIO(zp.read(zp.filelist[0]))
  1759. else:
  1760. fp = path
  1761. reread = read_stata(fp, index_col="index")
  1762. expected = df.copy()
  1763. expected.index = expected.index.astype(np.int32)
  1764. tm.assert_frame_equal(reread, expected)
  1765. @pytest.mark.parametrize("version", [114, 117, 118, 119, None])
  1766. def test_chunked_categorical(version):
  1767. df = DataFrame({"cats": Series(["a", "b", "a", "b", "c"], dtype="category")})
  1768. df.index.name = "index"
  1769. expected = df.copy()
  1770. expected.index = expected.index.astype(np.int32)
  1771. with tm.ensure_clean() as path:
  1772. df.to_stata(path, version=version)
  1773. with StataReader(path, chunksize=2, order_categoricals=False) as reader:
  1774. for i, block in enumerate(reader):
  1775. block = block.set_index("index")
  1776. assert "cats" in block
  1777. tm.assert_series_equal(
  1778. block.cats, expected.cats.iloc[2 * i : 2 * (i + 1)]
  1779. )
  1780. def test_chunked_categorical_partial(datapath):
  1781. dta_file = datapath("io", "data", "stata", "stata-dta-partially-labeled.dta")
  1782. values = ["a", "b", "a", "b", 3.0]
  1783. with StataReader(dta_file, chunksize=2) as reader:
  1784. with tm.assert_produces_warning(CategoricalConversionWarning):
  1785. for i, block in enumerate(reader):
  1786. assert list(block.cats) == values[2 * i : 2 * (i + 1)]
  1787. if i < 2:
  1788. idx = pd.Index(["a", "b"])
  1789. else:
  1790. idx = pd.Index([3.0], dtype="float64")
  1791. tm.assert_index_equal(block.cats.cat.categories, idx)
  1792. with tm.assert_produces_warning(CategoricalConversionWarning):
  1793. with StataReader(dta_file, chunksize=5) as reader:
  1794. large_chunk = reader.__next__()
  1795. direct = read_stata(dta_file)
  1796. tm.assert_frame_equal(direct, large_chunk)
  1797. @pytest.mark.parametrize("chunksize", (-1, 0, "apple"))
  1798. def test_iterator_errors(datapath, chunksize):
  1799. dta_file = datapath("io", "data", "stata", "stata-dta-partially-labeled.dta")
  1800. with pytest.raises(ValueError, match="chunksize must be a positive"):
  1801. with StataReader(dta_file, chunksize=chunksize):
  1802. pass
  1803. def test_iterator_value_labels():
  1804. # GH 31544
  1805. values = ["c_label", "b_label"] + ["a_label"] * 500
  1806. df = DataFrame({f"col{k}": pd.Categorical(values, ordered=True) for k in range(2)})
  1807. with tm.ensure_clean() as path:
  1808. df.to_stata(path, write_index=False)
  1809. expected = pd.Index(["a_label", "b_label", "c_label"], dtype="object")
  1810. with read_stata(path, chunksize=100) as reader:
  1811. for j, chunk in enumerate(reader):
  1812. for i in range(2):
  1813. tm.assert_index_equal(chunk.dtypes[i].categories, expected)
  1814. tm.assert_frame_equal(chunk, df.iloc[j * 100 : (j + 1) * 100])
  1815. def test_precision_loss():
  1816. df = DataFrame(
  1817. [[sum(2**i for i in range(60)), sum(2**i for i in range(52))]],
  1818. columns=["big", "little"],
  1819. )
  1820. with tm.ensure_clean() as path:
  1821. with tm.assert_produces_warning(
  1822. PossiblePrecisionLoss, match="Column converted from int64 to float64"
  1823. ):
  1824. df.to_stata(path, write_index=False)
  1825. reread = read_stata(path)
  1826. expected_dt = Series([np.float64, np.float64], index=["big", "little"])
  1827. tm.assert_series_equal(reread.dtypes, expected_dt)
  1828. assert reread.loc[0, "little"] == df.loc[0, "little"]
  1829. assert reread.loc[0, "big"] == float(df.loc[0, "big"])
  1830. def test_compression_roundtrip(compression):
  1831. df = DataFrame(
  1832. [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
  1833. index=["A", "B"],
  1834. columns=["X", "Y", "Z"],
  1835. )
  1836. df.index.name = "index"
  1837. with tm.ensure_clean() as path:
  1838. df.to_stata(path, compression=compression)
  1839. reread = read_stata(path, compression=compression, index_col="index")
  1840. tm.assert_frame_equal(df, reread)
  1841. # explicitly ensure file was compressed.
  1842. with tm.decompress_file(path, compression) as fh:
  1843. contents = io.BytesIO(fh.read())
  1844. reread = read_stata(contents, index_col="index")
  1845. tm.assert_frame_equal(df, reread)
  1846. @pytest.mark.parametrize("to_infer", [True, False])
  1847. @pytest.mark.parametrize("read_infer", [True, False])
  1848. def test_stata_compression(compression_only, read_infer, to_infer):
  1849. compression = compression_only
  1850. ext = _compression_to_extension[compression]
  1851. filename = f"test.{ext}"
  1852. df = DataFrame(
  1853. [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
  1854. index=["A", "B"],
  1855. columns=["X", "Y", "Z"],
  1856. )
  1857. df.index.name = "index"
  1858. to_compression = "infer" if to_infer else compression
  1859. read_compression = "infer" if read_infer else compression
  1860. with tm.ensure_clean(filename) as path:
  1861. df.to_stata(path, compression=to_compression)
  1862. result = read_stata(path, compression=read_compression, index_col="index")
  1863. tm.assert_frame_equal(result, df)
  1864. def test_non_categorical_value_labels():
  1865. data = DataFrame(
  1866. {
  1867. "fully_labelled": [1, 2, 3, 3, 1],
  1868. "partially_labelled": [1.0, 2.0, np.nan, 9.0, np.nan],
  1869. "Y": [7, 7, 9, 8, 10],
  1870. "Z": pd.Categorical(["j", "k", "l", "k", "j"]),
  1871. }
  1872. )
  1873. with tm.ensure_clean() as path:
  1874. value_labels = {
  1875. "fully_labelled": {1: "one", 2: "two", 3: "three"},
  1876. "partially_labelled": {1.0: "one", 2.0: "two"},
  1877. }
  1878. expected = {**value_labels, "Z": {0: "j", 1: "k", 2: "l"}}
  1879. writer = StataWriter(path, data, value_labels=value_labels)
  1880. writer.write_file()
  1881. with StataReader(path) as reader:
  1882. reader_value_labels = reader.value_labels()
  1883. assert reader_value_labels == expected
  1884. msg = "Can't create value labels for notY, it wasn't found in the dataset."
  1885. with pytest.raises(KeyError, match=msg):
  1886. value_labels = {"notY": {7: "label1", 8: "label2"}}
  1887. StataWriter(path, data, value_labels=value_labels)
  1888. msg = (
  1889. "Can't create value labels for Z, value labels "
  1890. "can only be applied to numeric columns."
  1891. )
  1892. with pytest.raises(ValueError, match=msg):
  1893. value_labels = {"Z": {1: "a", 2: "k", 3: "j", 4: "i"}}
  1894. StataWriter(path, data, value_labels=value_labels)
  1895. def test_non_categorical_value_label_name_conversion():
  1896. # Check conversion of invalid variable names
  1897. data = DataFrame(
  1898. {
  1899. "invalid~!": [1, 1, 2, 3, 5, 8], # Only alphanumeric and _
  1900. "6_invalid": [1, 1, 2, 3, 5, 8], # Must start with letter or _
  1901. "invalid_name_longer_than_32_characters": [8, 8, 9, 9, 8, 8], # Too long
  1902. "aggregate": [2, 5, 5, 6, 6, 9], # Reserved words
  1903. (1, 2): [1, 2, 3, 4, 5, 6], # Hashable non-string
  1904. }
  1905. )
  1906. value_labels = {
  1907. "invalid~!": {1: "label1", 2: "label2"},
  1908. "6_invalid": {1: "label1", 2: "label2"},
  1909. "invalid_name_longer_than_32_characters": {8: "eight", 9: "nine"},
  1910. "aggregate": {5: "five"},
  1911. (1, 2): {3: "three"},
  1912. }
  1913. expected = {
  1914. "invalid__": {1: "label1", 2: "label2"},
  1915. "_6_invalid": {1: "label1", 2: "label2"},
  1916. "invalid_name_longer_than_32_char": {8: "eight", 9: "nine"},
  1917. "_aggregate": {5: "five"},
  1918. "_1__2_": {3: "three"},
  1919. }
  1920. with tm.ensure_clean() as path:
  1921. with tm.assert_produces_warning(InvalidColumnName):
  1922. data.to_stata(path, value_labels=value_labels)
  1923. with StataReader(path) as reader:
  1924. reader_value_labels = reader.value_labels()
  1925. assert reader_value_labels == expected
  1926. def test_non_categorical_value_label_convert_categoricals_error():
  1927. # Mapping more than one value to the same label is valid for Stata
  1928. # labels, but can't be read with convert_categoricals=True
  1929. value_labels = {
  1930. "repeated_labels": {10: "Ten", 20: "More than ten", 40: "More than ten"}
  1931. }
  1932. data = DataFrame(
  1933. {
  1934. "repeated_labels": [10, 10, 20, 20, 40, 40],
  1935. }
  1936. )
  1937. with tm.ensure_clean() as path:
  1938. data.to_stata(path, value_labels=value_labels)
  1939. with StataReader(path, convert_categoricals=False) as reader:
  1940. reader_value_labels = reader.value_labels()
  1941. assert reader_value_labels == value_labels
  1942. col = "repeated_labels"
  1943. repeats = "-" * 80 + "\n" + "\n".join(["More than ten"])
  1944. msg = f"""
  1945. Value labels for column {col} are not unique. These cannot be converted to
  1946. pandas categoricals.
  1947. Either read the file with `convert_categoricals` set to False or use the
  1948. low level interface in `StataReader` to separately read the values and the
  1949. value_labels.
  1950. The repeated labels are:
  1951. {repeats}
  1952. """
  1953. with pytest.raises(ValueError, match=msg):
  1954. read_stata(path, convert_categoricals=True)
  1955. @pytest.mark.parametrize("version", [114, 117, 118, 119, None])
  1956. @pytest.mark.parametrize(
  1957. "dtype",
  1958. [
  1959. pd.BooleanDtype,
  1960. pd.Int8Dtype,
  1961. pd.Int16Dtype,
  1962. pd.Int32Dtype,
  1963. pd.Int64Dtype,
  1964. pd.UInt8Dtype,
  1965. pd.UInt16Dtype,
  1966. pd.UInt32Dtype,
  1967. pd.UInt64Dtype,
  1968. ],
  1969. )
  1970. def test_nullable_support(dtype, version):
  1971. df = DataFrame(
  1972. {
  1973. "a": Series([1.0, 2.0, 3.0]),
  1974. "b": Series([1, pd.NA, pd.NA], dtype=dtype.name),
  1975. "c": Series(["a", "b", None]),
  1976. }
  1977. )
  1978. dtype_name = df.b.dtype.numpy_dtype.name
  1979. # Only use supported names: no uint, bool or int64
  1980. dtype_name = dtype_name.replace("u", "")
  1981. if dtype_name == "int64":
  1982. dtype_name = "int32"
  1983. elif dtype_name == "bool":
  1984. dtype_name = "int8"
  1985. value = StataMissingValue.BASE_MISSING_VALUES[dtype_name]
  1986. smv = StataMissingValue(value)
  1987. expected_b = Series([1, smv, smv], dtype=object, name="b")
  1988. expected_c = Series(["a", "b", ""], name="c")
  1989. with tm.ensure_clean() as path:
  1990. df.to_stata(path, write_index=False, version=version)
  1991. reread = read_stata(path, convert_missing=True)
  1992. tm.assert_series_equal(df.a, reread.a)
  1993. tm.assert_series_equal(reread.b, expected_b)
  1994. tm.assert_series_equal(reread.c, expected_c)