test_encoding.py 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315
  1. """
  2. Tests encoding functionality during parsing
  3. for all of the parsers defined in parsers.py
  4. """
  5. from io import (
  6. BytesIO,
  7. TextIOWrapper,
  8. )
  9. import os
  10. import tempfile
  11. import uuid
  12. import numpy as np
  13. import pytest
  14. from pandas import (
  15. DataFrame,
  16. read_csv,
  17. )
  18. import pandas._testing as tm
  19. skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")
  20. xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")
  21. def test_bytes_io_input(all_parsers):
  22. encoding = "cp1255"
  23. parser = all_parsers
  24. data = BytesIO("שלום:1234\n562:123".encode(encoding))
  25. result = parser.read_csv(data, sep=":", encoding=encoding)
  26. expected = DataFrame([[562, 123]], columns=["שלום", "1234"])
  27. tm.assert_frame_equal(result, expected)
  28. @skip_pyarrow
  29. def test_read_csv_unicode(all_parsers):
  30. parser = all_parsers
  31. data = BytesIO("\u0141aski, Jan;1".encode())
  32. result = parser.read_csv(data, sep=";", encoding="utf-8", header=None)
  33. expected = DataFrame([["\u0141aski, Jan", 1]])
  34. tm.assert_frame_equal(result, expected)
  35. @xfail_pyarrow
  36. @pytest.mark.parametrize("sep", [",", "\t"])
  37. @pytest.mark.parametrize("encoding", ["utf-16", "utf-16le", "utf-16be"])
  38. def test_utf16_bom_skiprows(all_parsers, sep, encoding):
  39. # see gh-2298
  40. parser = all_parsers
  41. data = """skip this
  42. skip this too
  43. A,B,C
  44. 1,2,3
  45. 4,5,6""".replace(
  46. ",", sep
  47. )
  48. path = f"__{uuid.uuid4()}__.csv"
  49. kwargs = {"sep": sep, "skiprows": 2}
  50. utf8 = "utf-8"
  51. with tm.ensure_clean(path) as path:
  52. bytes_data = data.encode(encoding)
  53. with open(path, "wb") as f:
  54. f.write(bytes_data)
  55. with TextIOWrapper(BytesIO(data.encode(utf8)), encoding=utf8) as bytes_buffer:
  56. result = parser.read_csv(path, encoding=encoding, **kwargs)
  57. expected = parser.read_csv(bytes_buffer, encoding=utf8, **kwargs)
  58. tm.assert_frame_equal(result, expected)
  59. def test_utf16_example(all_parsers, csv_dir_path):
  60. path = os.path.join(csv_dir_path, "utf16_ex.txt")
  61. parser = all_parsers
  62. result = parser.read_csv(path, encoding="utf-16", sep="\t")
  63. assert len(result) == 50
  64. def test_unicode_encoding(all_parsers, csv_dir_path):
  65. path = os.path.join(csv_dir_path, "unicode_series.csv")
  66. parser = all_parsers
  67. result = parser.read_csv(path, header=None, encoding="latin-1")
  68. result = result.set_index(0)
  69. got = result[1][1632]
  70. expected = "\xc1 k\xf6ldum klaka (Cold Fever) (1994)"
  71. assert got == expected
  72. @pytest.mark.parametrize(
  73. "data,kwargs,expected",
  74. [
  75. # Basic test
  76. ("a\n1", {}, DataFrame({"a": [1]})),
  77. # "Regular" quoting
  78. ('"a"\n1', {"quotechar": '"'}, DataFrame({"a": [1]})),
  79. # Test in a data row instead of header
  80. ("b\n1", {"names": ["a"]}, DataFrame({"a": ["b", "1"]})),
  81. # Test in empty data row with skipping
  82. ("\n1", {"names": ["a"], "skip_blank_lines": True}, DataFrame({"a": [1]})),
  83. # Test in empty data row without skipping
  84. (
  85. "\n1",
  86. {"names": ["a"], "skip_blank_lines": False},
  87. DataFrame({"a": [np.nan, 1]}),
  88. ),
  89. ],
  90. )
  91. def test_utf8_bom(all_parsers, data, kwargs, expected, request):
  92. # see gh-4793
  93. parser = all_parsers
  94. bom = "\ufeff"
  95. utf8 = "utf-8"
  96. def _encode_data_with_bom(_data):
  97. bom_data = (bom + _data).encode(utf8)
  98. return BytesIO(bom_data)
  99. if (
  100. parser.engine == "pyarrow"
  101. and data == "\n1"
  102. and kwargs.get("skip_blank_lines", True)
  103. ):
  104. # Manually xfail, since we don't have mechanism to xfail specific version
  105. request.node.add_marker(
  106. pytest.mark.xfail(reason="Pyarrow can't read blank lines")
  107. )
  108. result = parser.read_csv(_encode_data_with_bom(data), encoding=utf8, **kwargs)
  109. tm.assert_frame_equal(result, expected)
  110. def test_read_csv_utf_aliases(all_parsers, utf_value, encoding_fmt):
  111. # see gh-13549
  112. expected = DataFrame({"mb_num": [4.8], "multibyte": ["test"]})
  113. parser = all_parsers
  114. encoding = encoding_fmt.format(utf_value)
  115. data = "mb_num,multibyte\n4.8,test".encode(encoding)
  116. result = parser.read_csv(BytesIO(data), encoding=encoding)
  117. tm.assert_frame_equal(result, expected)
  118. @pytest.mark.parametrize(
  119. "file_path,encoding",
  120. [
  121. (("io", "data", "csv", "test1.csv"), "utf-8"),
  122. (("io", "parser", "data", "unicode_series.csv"), "latin-1"),
  123. (("io", "parser", "data", "sauron.SHIFT_JIS.csv"), "shiftjis"),
  124. ],
  125. )
  126. def test_binary_mode_file_buffers(all_parsers, file_path, encoding, datapath):
  127. # gh-23779: Python csv engine shouldn't error on files opened in binary.
  128. # gh-31575: Python csv engine shouldn't error on files opened in raw binary.
  129. parser = all_parsers
  130. fpath = datapath(*file_path)
  131. expected = parser.read_csv(fpath, encoding=encoding)
  132. with open(fpath, encoding=encoding) as fa:
  133. result = parser.read_csv(fa)
  134. assert not fa.closed
  135. tm.assert_frame_equal(expected, result)
  136. with open(fpath, mode="rb") as fb:
  137. result = parser.read_csv(fb, encoding=encoding)
  138. assert not fb.closed
  139. tm.assert_frame_equal(expected, result)
  140. with open(fpath, mode="rb", buffering=0) as fb:
  141. result = parser.read_csv(fb, encoding=encoding)
  142. assert not fb.closed
  143. tm.assert_frame_equal(expected, result)
  144. @skip_pyarrow
  145. @pytest.mark.parametrize("pass_encoding", [True, False])
  146. def test_encoding_temp_file(all_parsers, utf_value, encoding_fmt, pass_encoding):
  147. # see gh-24130
  148. parser = all_parsers
  149. encoding = encoding_fmt.format(utf_value)
  150. expected = DataFrame({"foo": ["bar"]})
  151. with tm.ensure_clean(mode="w+", encoding=encoding, return_filelike=True) as f:
  152. f.write("foo\nbar")
  153. f.seek(0)
  154. result = parser.read_csv(f, encoding=encoding if pass_encoding else None)
  155. tm.assert_frame_equal(result, expected)
  156. @skip_pyarrow
  157. def test_encoding_named_temp_file(all_parsers):
  158. # see gh-31819
  159. parser = all_parsers
  160. encoding = "shift-jis"
  161. title = "てすと"
  162. data = "こむ"
  163. expected = DataFrame({title: [data]})
  164. with tempfile.NamedTemporaryFile() as f:
  165. f.write(f"{title}\n{data}".encode(encoding))
  166. f.seek(0)
  167. result = parser.read_csv(f, encoding=encoding)
  168. tm.assert_frame_equal(result, expected)
  169. assert not f.closed
  170. @pytest.mark.parametrize(
  171. "encoding", ["utf-8", "utf-16", "utf-16-be", "utf-16-le", "utf-32"]
  172. )
  173. def test_parse_encoded_special_characters(encoding):
  174. # GH16218 Verify parsing of data with encoded special characters
  175. # Data contains a Unicode 'FULLWIDTH COLON' (U+FF1A) at position (0,"a")
  176. data = "a\tb\n:foo\t0\nbar\t1\nbaz\t2"
  177. encoded_data = BytesIO(data.encode(encoding))
  178. result = read_csv(encoded_data, delimiter="\t", encoding=encoding)
  179. expected = DataFrame(data=[[":foo", 0], ["bar", 1], ["baz", 2]], columns=["a", "b"])
  180. tm.assert_frame_equal(result, expected)
  181. @xfail_pyarrow
  182. @pytest.mark.parametrize("encoding", ["utf-8", None, "utf-16", "cp1255", "latin-1"])
  183. def test_encoding_memory_map(all_parsers, encoding):
  184. # GH40986
  185. parser = all_parsers
  186. expected = DataFrame(
  187. {
  188. "name": ["Raphael", "Donatello", "Miguel Angel", "Leonardo"],
  189. "mask": ["red", "purple", "orange", "blue"],
  190. "weapon": ["sai", "bo staff", "nunchunk", "katana"],
  191. }
  192. )
  193. with tm.ensure_clean() as file:
  194. expected.to_csv(file, index=False, encoding=encoding)
  195. df = parser.read_csv(file, encoding=encoding, memory_map=True)
  196. tm.assert_frame_equal(df, expected)
  197. @xfail_pyarrow
  198. def test_chunk_splits_multibyte_char(all_parsers):
  199. """
  200. Chunk splits a multibyte character with memory_map=True
  201. GH 43540
  202. """
  203. parser = all_parsers
  204. # DEFAULT_CHUNKSIZE = 262144, defined in parsers.pyx
  205. df = DataFrame(data=["a" * 127] * 2048)
  206. # Put two-bytes utf-8 encoded character "ą" at the end of chunk
  207. # utf-8 encoding of "ą" is b'\xc4\x85'
  208. df.iloc[2047] = "a" * 127 + "ą"
  209. with tm.ensure_clean("bug-gh43540.csv") as fname:
  210. df.to_csv(fname, index=False, header=False, encoding="utf-8")
  211. dfr = parser.read_csv(fname, header=None, memory_map=True, engine="c")
  212. tm.assert_frame_equal(dfr, df)
  213. @xfail_pyarrow
  214. def test_readcsv_memmap_utf8(all_parsers):
  215. """
  216. GH 43787
  217. Test correct handling of UTF-8 chars when memory_map=True and encoding is UTF-8
  218. """
  219. lines = []
  220. line_length = 128
  221. start_char = " "
  222. end_char = "\U00010080"
  223. # This for loop creates a list of 128-char strings
  224. # consisting of consecutive Unicode chars
  225. for lnum in range(ord(start_char), ord(end_char), line_length):
  226. line = "".join([chr(c) for c in range(lnum, lnum + 0x80)]) + "\n"
  227. try:
  228. line.encode("utf-8")
  229. except UnicodeEncodeError:
  230. continue
  231. lines.append(line)
  232. parser = all_parsers
  233. df = DataFrame(lines)
  234. with tm.ensure_clean("utf8test.csv") as fname:
  235. df.to_csv(fname, index=False, header=False, encoding="utf-8")
  236. dfr = parser.read_csv(
  237. fname, header=None, memory_map=True, engine="c", encoding="utf-8"
  238. )
  239. tm.assert_frame_equal(df, dfr)
  240. @pytest.mark.usefixtures("pyarrow_xfail")
  241. @pytest.mark.parametrize("mode", ["w+b", "w+t"])
  242. def test_not_readable(all_parsers, mode):
  243. # GH43439
  244. parser = all_parsers
  245. content = b"abcd"
  246. if "t" in mode:
  247. content = "abcd"
  248. with tempfile.SpooledTemporaryFile(mode=mode) as handle:
  249. handle.write(content)
  250. handle.seek(0)
  251. df = parser.read_csv(handle)
  252. expected = DataFrame([], columns=["abcd"])
  253. tm.assert_frame_equal(df, expected)