npyio.py 95 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546
  1. import os
  2. import re
  3. import functools
  4. import itertools
  5. import warnings
  6. import weakref
  7. import contextlib
  8. import operator
  9. from operator import itemgetter, index as opindex, methodcaller
  10. from collections.abc import Mapping
  11. import numpy as np
  12. from . import format
  13. from ._datasource import DataSource
  14. from numpy.core import overrides
  15. from numpy.core.multiarray import packbits, unpackbits
  16. from numpy.core._multiarray_umath import _load_from_filelike
  17. from numpy.core.overrides import set_array_function_like_doc, set_module
  18. from ._iotools import (
  19. LineSplitter, NameValidator, StringConverter, ConverterError,
  20. ConverterLockError, ConversionWarning, _is_string_like,
  21. has_nested_fields, flatten_dtype, easy_dtype, _decode_line
  22. )
  23. from numpy.compat import (
  24. asbytes, asstr, asunicode, os_fspath, os_PathLike,
  25. pickle
  26. )
  27. __all__ = [
  28. 'savetxt', 'loadtxt', 'genfromtxt',
  29. 'recfromtxt', 'recfromcsv', 'load', 'save', 'savez',
  30. 'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
  31. ]
  32. array_function_dispatch = functools.partial(
  33. overrides.array_function_dispatch, module='numpy')
  34. class BagObj:
  35. """
  36. BagObj(obj)
  37. Convert attribute look-ups to getitems on the object passed in.
  38. Parameters
  39. ----------
  40. obj : class instance
  41. Object on which attribute look-up is performed.
  42. Examples
  43. --------
  44. >>> from numpy.lib.npyio import BagObj as BO
  45. >>> class BagDemo:
  46. ... def __getitem__(self, key): # An instance of BagObj(BagDemo)
  47. ... # will call this method when any
  48. ... # attribute look-up is required
  49. ... result = "Doesn't matter what you want, "
  50. ... return result + "you're gonna get this"
  51. ...
  52. >>> demo_obj = BagDemo()
  53. >>> bagobj = BO(demo_obj)
  54. >>> bagobj.hello_there
  55. "Doesn't matter what you want, you're gonna get this"
  56. >>> bagobj.I_can_be_anything
  57. "Doesn't matter what you want, you're gonna get this"
  58. """
  59. def __init__(self, obj):
  60. # Use weakref to make NpzFile objects collectable by refcount
  61. self._obj = weakref.proxy(obj)
  62. def __getattribute__(self, key):
  63. try:
  64. return object.__getattribute__(self, '_obj')[key]
  65. except KeyError:
  66. raise AttributeError(key) from None
  67. def __dir__(self):
  68. """
  69. Enables dir(bagobj) to list the files in an NpzFile.
  70. This also enables tab-completion in an interpreter or IPython.
  71. """
  72. return list(object.__getattribute__(self, '_obj').keys())
  73. def zipfile_factory(file, *args, **kwargs):
  74. """
  75. Create a ZipFile.
  76. Allows for Zip64, and the `file` argument can accept file, str, or
  77. pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile
  78. constructor.
  79. """
  80. if not hasattr(file, 'read'):
  81. file = os_fspath(file)
  82. import zipfile
  83. kwargs['allowZip64'] = True
  84. return zipfile.ZipFile(file, *args, **kwargs)
  85. class NpzFile(Mapping):
  86. """
  87. NpzFile(fid)
  88. A dictionary-like object with lazy-loading of files in the zipped
  89. archive provided on construction.
  90. `NpzFile` is used to load files in the NumPy ``.npz`` data archive
  91. format. It assumes that files in the archive have a ``.npy`` extension,
  92. other files are ignored.
  93. The arrays and file strings are lazily loaded on either
  94. getitem access using ``obj['key']`` or attribute lookup using
  95. ``obj.f.key``. A list of all files (without ``.npy`` extensions) can
  96. be obtained with ``obj.files`` and the ZipFile object itself using
  97. ``obj.zip``.
  98. Attributes
  99. ----------
  100. files : list of str
  101. List of all files in the archive with a ``.npy`` extension.
  102. zip : ZipFile instance
  103. The ZipFile object initialized with the zipped archive.
  104. f : BagObj instance
  105. An object on which attribute can be performed as an alternative
  106. to getitem access on the `NpzFile` instance itself.
  107. allow_pickle : bool, optional
  108. Allow loading pickled data. Default: False
  109. .. versionchanged:: 1.16.3
  110. Made default False in response to CVE-2019-6446.
  111. pickle_kwargs : dict, optional
  112. Additional keyword arguments to pass on to pickle.load.
  113. These are only useful when loading object arrays saved on
  114. Python 2 when using Python 3.
  115. max_header_size : int, optional
  116. Maximum allowed size of the header. Large headers may not be safe
  117. to load securely and thus require explicitly passing a larger value.
  118. See :py:meth:`ast.literal_eval()` for details.
  119. This option is ignored when `allow_pickle` is passed. In that case
  120. the file is by definition trusted and the limit is unnecessary.
  121. Parameters
  122. ----------
  123. fid : file or str
  124. The zipped archive to open. This is either a file-like object
  125. or a string containing the path to the archive.
  126. own_fid : bool, optional
  127. Whether NpzFile should close the file handle.
  128. Requires that `fid` is a file-like object.
  129. Examples
  130. --------
  131. >>> from tempfile import TemporaryFile
  132. >>> outfile = TemporaryFile()
  133. >>> x = np.arange(10)
  134. >>> y = np.sin(x)
  135. >>> np.savez(outfile, x=x, y=y)
  136. >>> _ = outfile.seek(0)
  137. >>> npz = np.load(outfile)
  138. >>> isinstance(npz, np.lib.npyio.NpzFile)
  139. True
  140. >>> sorted(npz.files)
  141. ['x', 'y']
  142. >>> npz['x'] # getitem access
  143. array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
  144. >>> npz.f.x # attribute lookup
  145. array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
  146. """
  147. # Make __exit__ safe if zipfile_factory raises an exception
  148. zip = None
  149. fid = None
  150. def __init__(self, fid, own_fid=False, allow_pickle=False,
  151. pickle_kwargs=None, *,
  152. max_header_size=format._MAX_HEADER_SIZE):
  153. # Import is postponed to here since zipfile depends on gzip, an
  154. # optional component of the so-called standard library.
  155. _zip = zipfile_factory(fid)
  156. self._files = _zip.namelist()
  157. self.files = []
  158. self.allow_pickle = allow_pickle
  159. self.max_header_size = max_header_size
  160. self.pickle_kwargs = pickle_kwargs
  161. for x in self._files:
  162. if x.endswith('.npy'):
  163. self.files.append(x[:-4])
  164. else:
  165. self.files.append(x)
  166. self.zip = _zip
  167. self.f = BagObj(self)
  168. if own_fid:
  169. self.fid = fid
  170. def __enter__(self):
  171. return self
  172. def __exit__(self, exc_type, exc_value, traceback):
  173. self.close()
  174. def close(self):
  175. """
  176. Close the file.
  177. """
  178. if self.zip is not None:
  179. self.zip.close()
  180. self.zip = None
  181. if self.fid is not None:
  182. self.fid.close()
  183. self.fid = None
  184. self.f = None # break reference cycle
  185. def __del__(self):
  186. self.close()
  187. # Implement the Mapping ABC
  188. def __iter__(self):
  189. return iter(self.files)
  190. def __len__(self):
  191. return len(self.files)
  192. def __getitem__(self, key):
  193. # FIXME: This seems like it will copy strings around
  194. # more than is strictly necessary. The zipfile
  195. # will read the string and then
  196. # the format.read_array will copy the string
  197. # to another place in memory.
  198. # It would be better if the zipfile could read
  199. # (or at least uncompress) the data
  200. # directly into the array memory.
  201. member = False
  202. if key in self._files:
  203. member = True
  204. elif key in self.files:
  205. member = True
  206. key += '.npy'
  207. if member:
  208. bytes = self.zip.open(key)
  209. magic = bytes.read(len(format.MAGIC_PREFIX))
  210. bytes.close()
  211. if magic == format.MAGIC_PREFIX:
  212. bytes = self.zip.open(key)
  213. return format.read_array(bytes,
  214. allow_pickle=self.allow_pickle,
  215. pickle_kwargs=self.pickle_kwargs,
  216. max_header_size=self.max_header_size)
  217. else:
  218. return self.zip.read(key)
  219. else:
  220. raise KeyError("%s is not a file in the archive" % key)
  221. @set_module('numpy')
  222. def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True,
  223. encoding='ASCII', *, max_header_size=format._MAX_HEADER_SIZE):
  224. """
  225. Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
  226. .. warning:: Loading files that contain object arrays uses the ``pickle``
  227. module, which is not secure against erroneous or maliciously
  228. constructed data. Consider passing ``allow_pickle=False`` to
  229. load data that is known not to contain object arrays for the
  230. safer handling of untrusted sources.
  231. Parameters
  232. ----------
  233. file : file-like object, string, or pathlib.Path
  234. The file to read. File-like objects must support the
  235. ``seek()`` and ``read()`` methods and must always
  236. be opened in binary mode. Pickled files require that the
  237. file-like object support the ``readline()`` method as well.
  238. mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
  239. If not None, then memory-map the file, using the given mode (see
  240. `numpy.memmap` for a detailed description of the modes). A
  241. memory-mapped array is kept on disk. However, it can be accessed
  242. and sliced like any ndarray. Memory mapping is especially useful
  243. for accessing small fragments of large files without reading the
  244. entire file into memory.
  245. allow_pickle : bool, optional
  246. Allow loading pickled object arrays stored in npy files. Reasons for
  247. disallowing pickles include security, as loading pickled data can
  248. execute arbitrary code. If pickles are disallowed, loading object
  249. arrays will fail. Default: False
  250. .. versionchanged:: 1.16.3
  251. Made default False in response to CVE-2019-6446.
  252. fix_imports : bool, optional
  253. Only useful when loading Python 2 generated pickled files on Python 3,
  254. which includes npy/npz files containing object arrays. If `fix_imports`
  255. is True, pickle will try to map the old Python 2 names to the new names
  256. used in Python 3.
  257. encoding : str, optional
  258. What encoding to use when reading Python 2 strings. Only useful when
  259. loading Python 2 generated pickled files in Python 3, which includes
  260. npy/npz files containing object arrays. Values other than 'latin1',
  261. 'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
  262. data. Default: 'ASCII'
  263. max_header_size : int, optional
  264. Maximum allowed size of the header. Large headers may not be safe
  265. to load securely and thus require explicitly passing a larger value.
  266. See :py:meth:`ast.literal_eval()` for details.
  267. This option is ignored when `allow_pickle` is passed. In that case
  268. the file is by definition trusted and the limit is unnecessary.
  269. Returns
  270. -------
  271. result : array, tuple, dict, etc.
  272. Data stored in the file. For ``.npz`` files, the returned instance
  273. of NpzFile class must be closed to avoid leaking file descriptors.
  274. Raises
  275. ------
  276. OSError
  277. If the input file does not exist or cannot be read.
  278. UnpicklingError
  279. If ``allow_pickle=True``, but the file cannot be loaded as a pickle.
  280. ValueError
  281. The file contains an object array, but ``allow_pickle=False`` given.
  282. See Also
  283. --------
  284. save, savez, savez_compressed, loadtxt
  285. memmap : Create a memory-map to an array stored in a file on disk.
  286. lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.
  287. Notes
  288. -----
  289. - If the file contains pickle data, then whatever object is stored
  290. in the pickle is returned.
  291. - If the file is a ``.npy`` file, then a single array is returned.
  292. - If the file is a ``.npz`` file, then a dictionary-like object is
  293. returned, containing ``{filename: array}`` key-value pairs, one for
  294. each file in the archive.
  295. - If the file is a ``.npz`` file, the returned value supports the
  296. context manager protocol in a similar fashion to the open function::
  297. with load('foo.npz') as data:
  298. a = data['a']
  299. The underlying file descriptor is closed when exiting the 'with'
  300. block.
  301. Examples
  302. --------
  303. Store data to disk, and load it again:
  304. >>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
  305. >>> np.load('/tmp/123.npy')
  306. array([[1, 2, 3],
  307. [4, 5, 6]])
  308. Store compressed data to disk, and load it again:
  309. >>> a=np.array([[1, 2, 3], [4, 5, 6]])
  310. >>> b=np.array([1, 2])
  311. >>> np.savez('/tmp/123.npz', a=a, b=b)
  312. >>> data = np.load('/tmp/123.npz')
  313. >>> data['a']
  314. array([[1, 2, 3],
  315. [4, 5, 6]])
  316. >>> data['b']
  317. array([1, 2])
  318. >>> data.close()
  319. Mem-map the stored array, and then access the second row
  320. directly from disk:
  321. >>> X = np.load('/tmp/123.npy', mmap_mode='r')
  322. >>> X[1, :]
  323. memmap([4, 5, 6])
  324. """
  325. if encoding not in ('ASCII', 'latin1', 'bytes'):
  326. # The 'encoding' value for pickle also affects what encoding
  327. # the serialized binary data of NumPy arrays is loaded
  328. # in. Pickle does not pass on the encoding information to
  329. # NumPy. The unpickling code in numpy.core.multiarray is
  330. # written to assume that unicode data appearing where binary
  331. # should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
  332. #
  333. # Other encoding values can corrupt binary data, and we
  334. # purposefully disallow them. For the same reason, the errors=
  335. # argument is not exposed, as values other than 'strict'
  336. # result can similarly silently corrupt numerical data.
  337. raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
  338. pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
  339. with contextlib.ExitStack() as stack:
  340. if hasattr(file, 'read'):
  341. fid = file
  342. own_fid = False
  343. else:
  344. fid = stack.enter_context(open(os_fspath(file), "rb"))
  345. own_fid = True
  346. # Code to distinguish from NumPy binary files and pickles.
  347. _ZIP_PREFIX = b'PK\x03\x04'
  348. _ZIP_SUFFIX = b'PK\x05\x06' # empty zip files start with this
  349. N = len(format.MAGIC_PREFIX)
  350. magic = fid.read(N)
  351. # If the file size is less than N, we need to make sure not
  352. # to seek past the beginning of the file
  353. fid.seek(-min(N, len(magic)), 1) # back-up
  354. if magic.startswith(_ZIP_PREFIX) or magic.startswith(_ZIP_SUFFIX):
  355. # zip-file (assume .npz)
  356. # Potentially transfer file ownership to NpzFile
  357. stack.pop_all()
  358. ret = NpzFile(fid, own_fid=own_fid, allow_pickle=allow_pickle,
  359. pickle_kwargs=pickle_kwargs,
  360. max_header_size=max_header_size)
  361. return ret
  362. elif magic == format.MAGIC_PREFIX:
  363. # .npy file
  364. if mmap_mode:
  365. if allow_pickle:
  366. max_header_size = 2**64
  367. return format.open_memmap(file, mode=mmap_mode,
  368. max_header_size=max_header_size)
  369. else:
  370. return format.read_array(fid, allow_pickle=allow_pickle,
  371. pickle_kwargs=pickle_kwargs,
  372. max_header_size=max_header_size)
  373. else:
  374. # Try a pickle
  375. if not allow_pickle:
  376. raise ValueError("Cannot load file containing pickled data "
  377. "when allow_pickle=False")
  378. try:
  379. return pickle.load(fid, **pickle_kwargs)
  380. except Exception as e:
  381. raise pickle.UnpicklingError(
  382. f"Failed to interpret file {file!r} as a pickle") from e
  383. def _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None):
  384. return (arr,)
  385. @array_function_dispatch(_save_dispatcher)
  386. def save(file, arr, allow_pickle=True, fix_imports=True):
  387. """
  388. Save an array to a binary file in NumPy ``.npy`` format.
  389. Parameters
  390. ----------
  391. file : file, str, or pathlib.Path
  392. File or filename to which the data is saved. If file is a file-object,
  393. then the filename is unchanged. If file is a string or Path, a ``.npy``
  394. extension will be appended to the filename if it does not already
  395. have one.
  396. arr : array_like
  397. Array data to be saved.
  398. allow_pickle : bool, optional
  399. Allow saving object arrays using Python pickles. Reasons for disallowing
  400. pickles include security (loading pickled data can execute arbitrary
  401. code) and portability (pickled objects may not be loadable on different
  402. Python installations, for example if the stored objects require libraries
  403. that are not available, and not all pickled data is compatible between
  404. Python 2 and Python 3).
  405. Default: True
  406. fix_imports : bool, optional
  407. Only useful in forcing objects in object arrays on Python 3 to be
  408. pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
  409. will try to map the new Python 3 names to the old module names used in
  410. Python 2, so that the pickle data stream is readable with Python 2.
  411. See Also
  412. --------
  413. savez : Save several arrays into a ``.npz`` archive
  414. savetxt, load
  415. Notes
  416. -----
  417. For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
  418. Any data saved to the file is appended to the end of the file.
  419. Examples
  420. --------
  421. >>> from tempfile import TemporaryFile
  422. >>> outfile = TemporaryFile()
  423. >>> x = np.arange(10)
  424. >>> np.save(outfile, x)
  425. >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file
  426. >>> np.load(outfile)
  427. array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
  428. >>> with open('test.npy', 'wb') as f:
  429. ... np.save(f, np.array([1, 2]))
  430. ... np.save(f, np.array([1, 3]))
  431. >>> with open('test.npy', 'rb') as f:
  432. ... a = np.load(f)
  433. ... b = np.load(f)
  434. >>> print(a, b)
  435. # [1 2] [1 3]
  436. """
  437. if hasattr(file, 'write'):
  438. file_ctx = contextlib.nullcontext(file)
  439. else:
  440. file = os_fspath(file)
  441. if not file.endswith('.npy'):
  442. file = file + '.npy'
  443. file_ctx = open(file, "wb")
  444. with file_ctx as fid:
  445. arr = np.asanyarray(arr)
  446. format.write_array(fid, arr, allow_pickle=allow_pickle,
  447. pickle_kwargs=dict(fix_imports=fix_imports))
  448. def _savez_dispatcher(file, *args, **kwds):
  449. yield from args
  450. yield from kwds.values()
  451. @array_function_dispatch(_savez_dispatcher)
  452. def savez(file, *args, **kwds):
  453. """Save several arrays into a single file in uncompressed ``.npz`` format.
  454. Provide arrays as keyword arguments to store them under the
  455. corresponding name in the output file: ``savez(fn, x=x, y=y)``.
  456. If arrays are specified as positional arguments, i.e., ``savez(fn,
  457. x, y)``, their names will be `arr_0`, `arr_1`, etc.
  458. Parameters
  459. ----------
  460. file : str or file
  461. Either the filename (string) or an open file (file-like object)
  462. where the data will be saved. If file is a string or a Path, the
  463. ``.npz`` extension will be appended to the filename if it is not
  464. already there.
  465. args : Arguments, optional
  466. Arrays to save to the file. Please use keyword arguments (see
  467. `kwds` below) to assign names to arrays. Arrays specified as
  468. args will be named "arr_0", "arr_1", and so on.
  469. kwds : Keyword arguments, optional
  470. Arrays to save to the file. Each array will be saved to the
  471. output file with its corresponding keyword name.
  472. Returns
  473. -------
  474. None
  475. See Also
  476. --------
  477. save : Save a single array to a binary file in NumPy format.
  478. savetxt : Save an array to a file as plain text.
  479. savez_compressed : Save several arrays into a compressed ``.npz`` archive
  480. Notes
  481. -----
  482. The ``.npz`` file format is a zipped archive of files named after the
  483. variables they contain. The archive is not compressed and each file
  484. in the archive contains one variable in ``.npy`` format. For a
  485. description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
  486. When opening the saved ``.npz`` file with `load` a `NpzFile` object is
  487. returned. This is a dictionary-like object which can be queried for
  488. its list of arrays (with the ``.files`` attribute), and for the arrays
  489. themselves.
  490. Keys passed in `kwds` are used as filenames inside the ZIP archive.
  491. Therefore, keys should be valid filenames; e.g., avoid keys that begin with
  492. ``/`` or contain ``.``.
  493. When naming variables with keyword arguments, it is not possible to name a
  494. variable ``file``, as this would cause the ``file`` argument to be defined
  495. twice in the call to ``savez``.
  496. Examples
  497. --------
  498. >>> from tempfile import TemporaryFile
  499. >>> outfile = TemporaryFile()
  500. >>> x = np.arange(10)
  501. >>> y = np.sin(x)
  502. Using `savez` with \\*args, the arrays are saved with default names.
  503. >>> np.savez(outfile, x, y)
  504. >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file
  505. >>> npzfile = np.load(outfile)
  506. >>> npzfile.files
  507. ['arr_0', 'arr_1']
  508. >>> npzfile['arr_0']
  509. array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
  510. Using `savez` with \\**kwds, the arrays are saved with the keyword names.
  511. >>> outfile = TemporaryFile()
  512. >>> np.savez(outfile, x=x, y=y)
  513. >>> _ = outfile.seek(0)
  514. >>> npzfile = np.load(outfile)
  515. >>> sorted(npzfile.files)
  516. ['x', 'y']
  517. >>> npzfile['x']
  518. array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
  519. """
  520. _savez(file, args, kwds, False)
  521. def _savez_compressed_dispatcher(file, *args, **kwds):
  522. yield from args
  523. yield from kwds.values()
  524. @array_function_dispatch(_savez_compressed_dispatcher)
  525. def savez_compressed(file, *args, **kwds):
  526. """
  527. Save several arrays into a single file in compressed ``.npz`` format.
  528. Provide arrays as keyword arguments to store them under the
  529. corresponding name in the output file: ``savez(fn, x=x, y=y)``.
  530. If arrays are specified as positional arguments, i.e., ``savez(fn,
  531. x, y)``, their names will be `arr_0`, `arr_1`, etc.
  532. Parameters
  533. ----------
  534. file : str or file
  535. Either the filename (string) or an open file (file-like object)
  536. where the data will be saved. If file is a string or a Path, the
  537. ``.npz`` extension will be appended to the filename if it is not
  538. already there.
  539. args : Arguments, optional
  540. Arrays to save to the file. Please use keyword arguments (see
  541. `kwds` below) to assign names to arrays. Arrays specified as
  542. args will be named "arr_0", "arr_1", and so on.
  543. kwds : Keyword arguments, optional
  544. Arrays to save to the file. Each array will be saved to the
  545. output file with its corresponding keyword name.
  546. Returns
  547. -------
  548. None
  549. See Also
  550. --------
  551. numpy.save : Save a single array to a binary file in NumPy format.
  552. numpy.savetxt : Save an array to a file as plain text.
  553. numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
  554. numpy.load : Load the files created by savez_compressed.
  555. Notes
  556. -----
  557. The ``.npz`` file format is a zipped archive of files named after the
  558. variables they contain. The archive is compressed with
  559. ``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable
  560. in ``.npy`` format. For a description of the ``.npy`` format, see
  561. :py:mod:`numpy.lib.format`.
  562. When opening the saved ``.npz`` file with `load` a `NpzFile` object is
  563. returned. This is a dictionary-like object which can be queried for
  564. its list of arrays (with the ``.files`` attribute), and for the arrays
  565. themselves.
  566. Examples
  567. --------
  568. >>> test_array = np.random.rand(3, 2)
  569. >>> test_vector = np.random.rand(4)
  570. >>> np.savez_compressed('/tmp/123', a=test_array, b=test_vector)
  571. >>> loaded = np.load('/tmp/123.npz')
  572. >>> print(np.array_equal(test_array, loaded['a']))
  573. True
  574. >>> print(np.array_equal(test_vector, loaded['b']))
  575. True
  576. """
  577. _savez(file, args, kwds, True)
  578. def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
  579. # Import is postponed to here since zipfile depends on gzip, an optional
  580. # component of the so-called standard library.
  581. import zipfile
  582. if not hasattr(file, 'write'):
  583. file = os_fspath(file)
  584. if not file.endswith('.npz'):
  585. file = file + '.npz'
  586. namedict = kwds
  587. for i, val in enumerate(args):
  588. key = 'arr_%d' % i
  589. if key in namedict.keys():
  590. raise ValueError(
  591. "Cannot use un-named variables and keyword %s" % key)
  592. namedict[key] = val
  593. if compress:
  594. compression = zipfile.ZIP_DEFLATED
  595. else:
  596. compression = zipfile.ZIP_STORED
  597. zipf = zipfile_factory(file, mode="w", compression=compression)
  598. for key, val in namedict.items():
  599. fname = key + '.npy'
  600. val = np.asanyarray(val)
  601. # always force zip64, gh-10776
  602. with zipf.open(fname, 'w', force_zip64=True) as fid:
  603. format.write_array(fid, val,
  604. allow_pickle=allow_pickle,
  605. pickle_kwargs=pickle_kwargs)
  606. zipf.close()
  607. def _ensure_ndmin_ndarray_check_param(ndmin):
  608. """Just checks if the param ndmin is supported on
  609. _ensure_ndmin_ndarray. It is intended to be used as
  610. verification before running anything expensive.
  611. e.g. loadtxt, genfromtxt
  612. """
  613. # Check correctness of the values of `ndmin`
  614. if ndmin not in [0, 1, 2]:
  615. raise ValueError(f"Illegal value of ndmin keyword: {ndmin}")
  616. def _ensure_ndmin_ndarray(a, *, ndmin: int):
  617. """This is a helper function of loadtxt and genfromtxt to ensure
  618. proper minimum dimension as requested
  619. ndim : int. Supported values 1, 2, 3
  620. ^^ whenever this changes, keep in sync with
  621. _ensure_ndmin_ndarray_check_param
  622. """
  623. # Verify that the array has at least dimensions `ndmin`.
  624. # Tweak the size and shape of the arrays - remove extraneous dimensions
  625. if a.ndim > ndmin:
  626. a = np.squeeze(a)
  627. # and ensure we have the minimum number of dimensions asked for
  628. # - has to be in this order for the odd case ndmin=1, a.squeeze().ndim=0
  629. if a.ndim < ndmin:
  630. if ndmin == 1:
  631. a = np.atleast_1d(a)
  632. elif ndmin == 2:
  633. a = np.atleast_2d(a).T
  634. return a
  635. # amount of lines loadtxt reads in one chunk, can be overridden for testing
  636. _loadtxt_chunksize = 50000
  637. def _loadtxt_dispatcher(
  638. fname, dtype=None, comments=None, delimiter=None,
  639. converters=None, skiprows=None, usecols=None, unpack=None,
  640. ndmin=None, encoding=None, max_rows=None, *, like=None):
  641. return (like,)
  642. def _check_nonneg_int(value, name="argument"):
  643. try:
  644. operator.index(value)
  645. except TypeError:
  646. raise TypeError(f"{name} must be an integer") from None
  647. if value < 0:
  648. raise ValueError(f"{name} must be nonnegative")
  649. def _preprocess_comments(iterable, comments, encoding):
  650. """
  651. Generator that consumes a line iterated iterable and strips out the
  652. multiple (or multi-character) comments from lines.
  653. This is a pre-processing step to achieve feature parity with loadtxt
  654. (we assume that this feature is a nieche feature).
  655. """
  656. for line in iterable:
  657. if isinstance(line, bytes):
  658. # Need to handle conversion here, or the splitting would fail
  659. line = line.decode(encoding)
  660. for c in comments:
  661. line = line.split(c, 1)[0]
  662. yield line
  663. # The number of rows we read in one go if confronted with a parametric dtype
  664. _loadtxt_chunksize = 50000
  665. def _read(fname, *, delimiter=',', comment='#', quote='"',
  666. imaginary_unit='j', usecols=None, skiplines=0,
  667. max_rows=None, converters=None, ndmin=None, unpack=False,
  668. dtype=np.float64, encoding="bytes"):
  669. r"""
  670. Read a NumPy array from a text file.
  671. Parameters
  672. ----------
  673. fname : str or file object
  674. The filename or the file to be read.
  675. delimiter : str, optional
  676. Field delimiter of the fields in line of the file.
  677. Default is a comma, ','. If None any sequence of whitespace is
  678. considered a delimiter.
  679. comment : str or sequence of str or None, optional
  680. Character that begins a comment. All text from the comment
  681. character to the end of the line is ignored.
  682. Multiple comments or multiple-character comment strings are supported,
  683. but may be slower and `quote` must be empty if used.
  684. Use None to disable all use of comments.
  685. quote : str or None, optional
  686. Character that is used to quote string fields. Default is '"'
  687. (a double quote). Use None to disable quote support.
  688. imaginary_unit : str, optional
  689. Character that represent the imaginay unit `sqrt(-1)`.
  690. Default is 'j'.
  691. usecols : array_like, optional
  692. A one-dimensional array of integer column numbers. These are the
  693. columns from the file to be included in the array. If this value
  694. is not given, all the columns are used.
  695. skiplines : int, optional
  696. Number of lines to skip before interpreting the data in the file.
  697. max_rows : int, optional
  698. Maximum number of rows of data to read. Default is to read the
  699. entire file.
  700. converters : dict or callable, optional
  701. A function to parse all columns strings into the desired value, or
  702. a dictionary mapping column number to a parser function.
  703. E.g. if column 0 is a date string: ``converters = {0: datestr2num}``.
  704. Converters can also be used to provide a default value for missing
  705. data, e.g. ``converters = lambda s: float(s.strip() or 0)`` will
  706. convert empty fields to 0.
  707. Default: None
  708. ndmin : int, optional
  709. Minimum dimension of the array returned.
  710. Allowed values are 0, 1 or 2. Default is 0.
  711. unpack : bool, optional
  712. If True, the returned array is transposed, so that arguments may be
  713. unpacked using ``x, y, z = read(...)``. When used with a structured
  714. data-type, arrays are returned for each field. Default is False.
  715. dtype : numpy data type
  716. A NumPy dtype instance, can be a structured dtype to map to the
  717. columns of the file.
  718. encoding : str, optional
  719. Encoding used to decode the inputfile. The special value 'bytes'
  720. (the default) enables backwards-compatible behavior for `converters`,
  721. ensuring that inputs to the converter functions are encoded
  722. bytes objects. The special value 'bytes' has no additional effect if
  723. ``converters=None``. If encoding is ``'bytes'`` or ``None``, the
  724. default system encoding is used.
  725. Returns
  726. -------
  727. ndarray
  728. NumPy array.
  729. Examples
  730. --------
  731. First we create a file for the example.
  732. >>> s1 = '1.0,2.0,3.0\n4.0,5.0,6.0\n'
  733. >>> with open('example1.csv', 'w') as f:
  734. ... f.write(s1)
  735. >>> a1 = read_from_filename('example1.csv')
  736. >>> a1
  737. array([[1., 2., 3.],
  738. [4., 5., 6.]])
  739. The second example has columns with different data types, so a
  740. one-dimensional array with a structured data type is returned.
  741. The tab character is used as the field delimiter.
  742. >>> s2 = '1.0\t10\talpha\n2.3\t25\tbeta\n4.5\t16\tgamma\n'
  743. >>> with open('example2.tsv', 'w') as f:
  744. ... f.write(s2)
  745. >>> a2 = read_from_filename('example2.tsv', delimiter='\t')
  746. >>> a2
  747. array([(1. , 10, b'alpha'), (2.3, 25, b'beta'), (4.5, 16, b'gamma')],
  748. dtype=[('f0', '<f8'), ('f1', 'u1'), ('f2', 'S5')])
  749. """
  750. # Handle special 'bytes' keyword for encoding
  751. byte_converters = False
  752. if encoding == 'bytes':
  753. encoding = None
  754. byte_converters = True
  755. if dtype is None:
  756. raise TypeError("a dtype must be provided.")
  757. dtype = np.dtype(dtype)
  758. read_dtype_via_object_chunks = None
  759. if dtype.kind in 'SUM' and (
  760. dtype == "S0" or dtype == "U0" or dtype == "M8" or dtype == 'm8'):
  761. # This is a legacy "flexible" dtype. We do not truly support
  762. # parametric dtypes currently (no dtype discovery step in the core),
  763. # but have to support these for backward compatibility.
  764. read_dtype_via_object_chunks = dtype
  765. dtype = np.dtype(object)
  766. if usecols is not None:
  767. # Allow usecols to be a single int or a sequence of ints, the C-code
  768. # handles the rest
  769. try:
  770. usecols = list(usecols)
  771. except TypeError:
  772. usecols = [usecols]
  773. _ensure_ndmin_ndarray_check_param(ndmin)
  774. if comment is None:
  775. comments = None
  776. else:
  777. # assume comments are a sequence of strings
  778. if "" in comment:
  779. raise ValueError(
  780. "comments cannot be an empty string. Use comments=None to "
  781. "disable comments."
  782. )
  783. comments = tuple(comment)
  784. comment = None
  785. if len(comments) == 0:
  786. comments = None # No comments at all
  787. elif len(comments) == 1:
  788. # If there is only one comment, and that comment has one character,
  789. # the normal parsing can deal with it just fine.
  790. if isinstance(comments[0], str) and len(comments[0]) == 1:
  791. comment = comments[0]
  792. comments = None
  793. else:
  794. # Input validation if there are multiple comment characters
  795. if delimiter in comments:
  796. raise TypeError(
  797. f"Comment characters '{comments}' cannot include the "
  798. f"delimiter '{delimiter}'"
  799. )
  800. # comment is now either a 1 or 0 character string or a tuple:
  801. if comments is not None:
  802. # Note: An earlier version support two character comments (and could
  803. # have been extended to multiple characters, we assume this is
  804. # rare enough to not optimize for.
  805. if quote is not None:
  806. raise ValueError(
  807. "when multiple comments or a multi-character comment is "
  808. "given, quotes are not supported. In this case quotechar "
  809. "must be set to None.")
  810. if len(imaginary_unit) != 1:
  811. raise ValueError('len(imaginary_unit) must be 1.')
  812. _check_nonneg_int(skiplines)
  813. if max_rows is not None:
  814. _check_nonneg_int(max_rows)
  815. else:
  816. # Passing -1 to the C code means "read the entire file".
  817. max_rows = -1
  818. fh_closing_ctx = contextlib.nullcontext()
  819. filelike = False
  820. try:
  821. if isinstance(fname, os.PathLike):
  822. fname = os.fspath(fname)
  823. if isinstance(fname, str):
  824. fh = np.lib._datasource.open(fname, 'rt', encoding=encoding)
  825. if encoding is None:
  826. encoding = getattr(fh, 'encoding', 'latin1')
  827. fh_closing_ctx = contextlib.closing(fh)
  828. data = fh
  829. filelike = True
  830. else:
  831. if encoding is None:
  832. encoding = getattr(fname, 'encoding', 'latin1')
  833. data = iter(fname)
  834. except TypeError as e:
  835. raise ValueError(
  836. f"fname must be a string, filehandle, list of strings,\n"
  837. f"or generator. Got {type(fname)} instead.") from e
  838. with fh_closing_ctx:
  839. if comments is not None:
  840. if filelike:
  841. data = iter(data)
  842. filelike = False
  843. data = _preprocess_comments(data, comments, encoding)
  844. if read_dtype_via_object_chunks is None:
  845. arr = _load_from_filelike(
  846. data, delimiter=delimiter, comment=comment, quote=quote,
  847. imaginary_unit=imaginary_unit,
  848. usecols=usecols, skiplines=skiplines, max_rows=max_rows,
  849. converters=converters, dtype=dtype,
  850. encoding=encoding, filelike=filelike,
  851. byte_converters=byte_converters)
  852. else:
  853. # This branch reads the file into chunks of object arrays and then
  854. # casts them to the desired actual dtype. This ensures correct
  855. # string-length and datetime-unit discovery (like `arr.astype()`).
  856. # Due to chunking, certain error reports are less clear, currently.
  857. if filelike:
  858. data = iter(data) # cannot chunk when reading from file
  859. c_byte_converters = False
  860. if read_dtype_via_object_chunks == "S":
  861. c_byte_converters = True # Use latin1 rather than ascii
  862. chunks = []
  863. while max_rows != 0:
  864. if max_rows < 0:
  865. chunk_size = _loadtxt_chunksize
  866. else:
  867. chunk_size = min(_loadtxt_chunksize, max_rows)
  868. next_arr = _load_from_filelike(
  869. data, delimiter=delimiter, comment=comment, quote=quote,
  870. imaginary_unit=imaginary_unit,
  871. usecols=usecols, skiplines=skiplines, max_rows=max_rows,
  872. converters=converters, dtype=dtype,
  873. encoding=encoding, filelike=filelike,
  874. byte_converters=byte_converters,
  875. c_byte_converters=c_byte_converters)
  876. # Cast here already. We hope that this is better even for
  877. # large files because the storage is more compact. It could
  878. # be adapted (in principle the concatenate could cast).
  879. chunks.append(next_arr.astype(read_dtype_via_object_chunks))
  880. skiprows = 0 # Only have to skip for first chunk
  881. if max_rows >= 0:
  882. max_rows -= chunk_size
  883. if len(next_arr) < chunk_size:
  884. # There was less data than requested, so we are done.
  885. break
  886. # Need at least one chunk, but if empty, the last one may have
  887. # the wrong shape.
  888. if len(chunks) > 1 and len(chunks[-1]) == 0:
  889. del chunks[-1]
  890. if len(chunks) == 1:
  891. arr = chunks[0]
  892. else:
  893. arr = np.concatenate(chunks, axis=0)
  894. # NOTE: ndmin works as advertised for structured dtypes, but normally
  895. # these would return a 1D result plus the structured dimension,
  896. # so ndmin=2 adds a third dimension even when no squeezing occurs.
  897. # A `squeeze=False` could be a better solution (pandas uses squeeze).
  898. arr = _ensure_ndmin_ndarray(arr, ndmin=ndmin)
  899. if arr.shape:
  900. if arr.shape[0] == 0:
  901. warnings.warn(
  902. f'loadtxt: input contained no data: "{fname}"',
  903. category=UserWarning,
  904. stacklevel=3
  905. )
  906. if unpack:
  907. # Unpack structured dtypes if requested:
  908. dt = arr.dtype
  909. if dt.names is not None:
  910. # For structured arrays, return an array for each field.
  911. return [arr[field] for field in dt.names]
  912. else:
  913. return arr.T
  914. else:
  915. return arr
  916. @set_array_function_like_doc
  917. @set_module('numpy')
  918. def loadtxt(fname, dtype=float, comments='#', delimiter=None,
  919. converters=None, skiprows=0, usecols=None, unpack=False,
  920. ndmin=0, encoding='bytes', max_rows=None, *, quotechar=None,
  921. like=None):
  922. r"""
  923. Load data from a text file.
  924. Parameters
  925. ----------
  926. fname : file, str, pathlib.Path, list of str, generator
  927. File, filename, list, or generator to read. If the filename
  928. extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note
  929. that generators must return bytes or strings. The strings
  930. in a list or produced by a generator are treated as lines.
  931. dtype : data-type, optional
  932. Data-type of the resulting array; default: float. If this is a
  933. structured data-type, the resulting array will be 1-dimensional, and
  934. each row will be interpreted as an element of the array. In this
  935. case, the number of columns used must match the number of fields in
  936. the data-type.
  937. comments : str or sequence of str or None, optional
  938. The characters or list of characters used to indicate the start of a
  939. comment. None implies no comments. For backwards compatibility, byte
  940. strings will be decoded as 'latin1'. The default is '#'.
  941. delimiter : str, optional
  942. The character used to separate the values. For backwards compatibility,
  943. byte strings will be decoded as 'latin1'. The default is whitespace.
  944. .. versionchanged:: 1.23.0
  945. Only single character delimiters are supported. Newline characters
  946. cannot be used as the delimiter.
  947. converters : dict or callable, optional
  948. Converter functions to customize value parsing. If `converters` is
  949. callable, the function is applied to all columns, else it must be a
  950. dict that maps column number to a parser function.
  951. See examples for further details.
  952. Default: None.
  953. .. versionchanged:: 1.23.0
  954. The ability to pass a single callable to be applied to all columns
  955. was added.
  956. skiprows : int, optional
  957. Skip the first `skiprows` lines, including comments; default: 0.
  958. usecols : int or sequence, optional
  959. Which columns to read, with 0 being the first. For example,
  960. ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
  961. The default, None, results in all columns being read.
  962. .. versionchanged:: 1.11.0
  963. When a single column has to be read it is possible to use
  964. an integer instead of a tuple. E.g ``usecols = 3`` reads the
  965. fourth column the same way as ``usecols = (3,)`` would.
  966. unpack : bool, optional
  967. If True, the returned array is transposed, so that arguments may be
  968. unpacked using ``x, y, z = loadtxt(...)``. When used with a
  969. structured data-type, arrays are returned for each field.
  970. Default is False.
  971. ndmin : int, optional
  972. The returned array will have at least `ndmin` dimensions.
  973. Otherwise mono-dimensional axes will be squeezed.
  974. Legal values: 0 (default), 1 or 2.
  975. .. versionadded:: 1.6.0
  976. encoding : str, optional
  977. Encoding used to decode the inputfile. Does not apply to input streams.
  978. The special value 'bytes' enables backward compatibility workarounds
  979. that ensures you receive byte arrays as results if possible and passes
  980. 'latin1' encoded strings to converters. Override this value to receive
  981. unicode arrays and pass strings as input to converters. If set to None
  982. the system default is used. The default value is 'bytes'.
  983. .. versionadded:: 1.14.0
  984. max_rows : int, optional
  985. Read `max_rows` rows of content after `skiprows` lines. The default is
  986. to read all the rows. Note that empty rows containing no data such as
  987. empty lines and comment lines are not counted towards `max_rows`,
  988. while such lines are counted in `skiprows`.
  989. .. versionadded:: 1.16.0
  990. .. versionchanged:: 1.23.0
  991. Lines containing no data, including comment lines (e.g., lines
  992. starting with '#' or as specified via `comments`) are not counted
  993. towards `max_rows`.
  994. quotechar : unicode character or None, optional
  995. The character used to denote the start and end of a quoted item.
  996. Occurrences of the delimiter or comment characters are ignored within
  997. a quoted item. The default value is ``quotechar=None``, which means
  998. quoting support is disabled.
  999. If two consecutive instances of `quotechar` are found within a quoted
  1000. field, the first is treated as an escape character. See examples.
  1001. .. versionadded:: 1.23.0
  1002. ${ARRAY_FUNCTION_LIKE}
  1003. .. versionadded:: 1.20.0
  1004. Returns
  1005. -------
  1006. out : ndarray
  1007. Data read from the text file.
  1008. See Also
  1009. --------
  1010. load, fromstring, fromregex
  1011. genfromtxt : Load data with missing values handled as specified.
  1012. scipy.io.loadmat : reads MATLAB data files
  1013. Notes
  1014. -----
  1015. This function aims to be a fast reader for simply formatted files. The
  1016. `genfromtxt` function provides more sophisticated handling of, e.g.,
  1017. lines with missing values.
  1018. Each row in the input text file must have the same number of values to be
  1019. able to read all values. If all rows do not have same number of values, a
  1020. subset of up to n columns (where n is the least number of values present
  1021. in all rows) can be read by specifying the columns via `usecols`.
  1022. .. versionadded:: 1.10.0
  1023. The strings produced by the Python float.hex method can be used as
  1024. input for floats.
  1025. Examples
  1026. --------
  1027. >>> from io import StringIO # StringIO behaves like a file object
  1028. >>> c = StringIO("0 1\n2 3")
  1029. >>> np.loadtxt(c)
  1030. array([[0., 1.],
  1031. [2., 3.]])
  1032. >>> d = StringIO("M 21 72\nF 35 58")
  1033. >>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
  1034. ... 'formats': ('S1', 'i4', 'f4')})
  1035. array([(b'M', 21, 72.), (b'F', 35, 58.)],
  1036. dtype=[('gender', 'S1'), ('age', '<i4'), ('weight', '<f4')])
  1037. >>> c = StringIO("1,0,2\n3,0,4")
  1038. >>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
  1039. >>> x
  1040. array([1., 3.])
  1041. >>> y
  1042. array([2., 4.])
  1043. The `converters` argument is used to specify functions to preprocess the
  1044. text prior to parsing. `converters` can be a dictionary that maps
  1045. preprocessing functions to each column:
  1046. >>> s = StringIO("1.618, 2.296\n3.141, 4.669\n")
  1047. >>> conv = {
  1048. ... 0: lambda x: np.floor(float(x)), # conversion fn for column 0
  1049. ... 1: lambda x: np.ceil(float(x)), # conversion fn for column 1
  1050. ... }
  1051. >>> np.loadtxt(s, delimiter=",", converters=conv)
  1052. array([[1., 3.],
  1053. [3., 5.]])
  1054. `converters` can be a callable instead of a dictionary, in which case it
  1055. is applied to all columns:
  1056. >>> s = StringIO("0xDE 0xAD\n0xC0 0xDE")
  1057. >>> import functools
  1058. >>> conv = functools.partial(int, base=16)
  1059. >>> np.loadtxt(s, converters=conv)
  1060. array([[222., 173.],
  1061. [192., 222.]])
  1062. This example shows how `converters` can be used to convert a field
  1063. with a trailing minus sign into a negative number.
  1064. >>> s = StringIO('10.01 31.25-\n19.22 64.31\n17.57- 63.94')
  1065. >>> def conv(fld):
  1066. ... return -float(fld[:-1]) if fld.endswith(b'-') else float(fld)
  1067. ...
  1068. >>> np.loadtxt(s, converters=conv)
  1069. array([[ 10.01, -31.25],
  1070. [ 19.22, 64.31],
  1071. [-17.57, 63.94]])
  1072. Using a callable as the converter can be particularly useful for handling
  1073. values with different formatting, e.g. floats with underscores:
  1074. >>> s = StringIO("1 2.7 100_000")
  1075. >>> np.loadtxt(s, converters=float)
  1076. array([1.e+00, 2.7e+00, 1.e+05])
  1077. This idea can be extended to automatically handle values specified in
  1078. many different formats:
  1079. >>> def conv(val):
  1080. ... try:
  1081. ... return float(val)
  1082. ... except ValueError:
  1083. ... return float.fromhex(val)
  1084. >>> s = StringIO("1, 2.5, 3_000, 0b4, 0x1.4000000000000p+2")
  1085. >>> np.loadtxt(s, delimiter=",", converters=conv, encoding=None)
  1086. array([1.0e+00, 2.5e+00, 3.0e+03, 1.8e+02, 5.0e+00])
  1087. Note that with the default ``encoding="bytes"``, the inputs to the
  1088. converter function are latin-1 encoded byte strings. To deactivate the
  1089. implicit encoding prior to conversion, use ``encoding=None``
  1090. >>> s = StringIO('10.01 31.25-\n19.22 64.31\n17.57- 63.94')
  1091. >>> conv = lambda x: -float(x[:-1]) if x.endswith('-') else float(x)
  1092. >>> np.loadtxt(s, converters=conv, encoding=None)
  1093. array([[ 10.01, -31.25],
  1094. [ 19.22, 64.31],
  1095. [-17.57, 63.94]])
  1096. Support for quoted fields is enabled with the `quotechar` parameter.
  1097. Comment and delimiter characters are ignored when they appear within a
  1098. quoted item delineated by `quotechar`:
  1099. >>> s = StringIO('"alpha, #42", 10.0\n"beta, #64", 2.0\n')
  1100. >>> dtype = np.dtype([("label", "U12"), ("value", float)])
  1101. >>> np.loadtxt(s, dtype=dtype, delimiter=",", quotechar='"')
  1102. array([('alpha, #42', 10.), ('beta, #64', 2.)],
  1103. dtype=[('label', '<U12'), ('value', '<f8')])
  1104. Quoted fields can be separated by multiple whitespace characters:
  1105. >>> s = StringIO('"alpha, #42" 10.0\n"beta, #64" 2.0\n')
  1106. >>> dtype = np.dtype([("label", "U12"), ("value", float)])
  1107. >>> np.loadtxt(s, dtype=dtype, delimiter=None, quotechar='"')
  1108. array([('alpha, #42', 10.), ('beta, #64', 2.)],
  1109. dtype=[('label', '<U12'), ('value', '<f8')])
  1110. Two consecutive quote characters within a quoted field are treated as a
  1111. single escaped character:
  1112. >>> s = StringIO('"Hello, my name is ""Monty""!"')
  1113. >>> np.loadtxt(s, dtype="U", delimiter=",", quotechar='"')
  1114. array('Hello, my name is "Monty"!', dtype='<U26')
  1115. Read subset of columns when all rows do not contain equal number of values:
  1116. >>> d = StringIO("1 2\n2 4\n3 9 12\n4 16 20")
  1117. >>> np.loadtxt(d, usecols=(0, 1))
  1118. array([[ 1., 2.],
  1119. [ 2., 4.],
  1120. [ 3., 9.],
  1121. [ 4., 16.]])
  1122. """
  1123. if like is not None:
  1124. return _loadtxt_with_like(
  1125. fname, dtype=dtype, comments=comments, delimiter=delimiter,
  1126. converters=converters, skiprows=skiprows, usecols=usecols,
  1127. unpack=unpack, ndmin=ndmin, encoding=encoding,
  1128. max_rows=max_rows, like=like
  1129. )
  1130. if isinstance(delimiter, bytes):
  1131. delimiter.decode("latin1")
  1132. if dtype is None:
  1133. dtype = np.float64
  1134. comment = comments
  1135. # Control character type conversions for Py3 convenience
  1136. if comment is not None:
  1137. if isinstance(comment, (str, bytes)):
  1138. comment = [comment]
  1139. comment = [
  1140. x.decode('latin1') if isinstance(x, bytes) else x for x in comment]
  1141. if isinstance(delimiter, bytes):
  1142. delimiter = delimiter.decode('latin1')
  1143. arr = _read(fname, dtype=dtype, comment=comment, delimiter=delimiter,
  1144. converters=converters, skiplines=skiprows, usecols=usecols,
  1145. unpack=unpack, ndmin=ndmin, encoding=encoding,
  1146. max_rows=max_rows, quote=quotechar)
  1147. return arr
  1148. _loadtxt_with_like = array_function_dispatch(
  1149. _loadtxt_dispatcher, use_like=True
  1150. )(loadtxt)
  1151. def _savetxt_dispatcher(fname, X, fmt=None, delimiter=None, newline=None,
  1152. header=None, footer=None, comments=None,
  1153. encoding=None):
  1154. return (X,)
  1155. @array_function_dispatch(_savetxt_dispatcher)
  1156. def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
  1157. footer='', comments='# ', encoding=None):
  1158. """
  1159. Save an array to a text file.
  1160. Parameters
  1161. ----------
  1162. fname : filename or file handle
  1163. If the filename ends in ``.gz``, the file is automatically saved in
  1164. compressed gzip format. `loadtxt` understands gzipped files
  1165. transparently.
  1166. X : 1D or 2D array_like
  1167. Data to be saved to a text file.
  1168. fmt : str or sequence of strs, optional
  1169. A single format (%10.5f), a sequence of formats, or a
  1170. multi-format string, e.g. 'Iteration %d -- %10.5f', in which
  1171. case `delimiter` is ignored. For complex `X`, the legal options
  1172. for `fmt` are:
  1173. * a single specifier, `fmt='%.4e'`, resulting in numbers formatted
  1174. like `' (%s+%sj)' % (fmt, fmt)`
  1175. * a full string specifying every real and imaginary part, e.g.
  1176. `' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns
  1177. * a list of specifiers, one per column - in this case, the real
  1178. and imaginary part must have separate specifiers,
  1179. e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
  1180. delimiter : str, optional
  1181. String or character separating columns.
  1182. newline : str, optional
  1183. String or character separating lines.
  1184. .. versionadded:: 1.5.0
  1185. header : str, optional
  1186. String that will be written at the beginning of the file.
  1187. .. versionadded:: 1.7.0
  1188. footer : str, optional
  1189. String that will be written at the end of the file.
  1190. .. versionadded:: 1.7.0
  1191. comments : str, optional
  1192. String that will be prepended to the ``header`` and ``footer`` strings,
  1193. to mark them as comments. Default: '# ', as expected by e.g.
  1194. ``numpy.loadtxt``.
  1195. .. versionadded:: 1.7.0
  1196. encoding : {None, str}, optional
  1197. Encoding used to encode the outputfile. Does not apply to output
  1198. streams. If the encoding is something other than 'bytes' or 'latin1'
  1199. you will not be able to load the file in NumPy versions < 1.14. Default
  1200. is 'latin1'.
  1201. .. versionadded:: 1.14.0
  1202. See Also
  1203. --------
  1204. save : Save an array to a binary file in NumPy ``.npy`` format
  1205. savez : Save several arrays into an uncompressed ``.npz`` archive
  1206. savez_compressed : Save several arrays into a compressed ``.npz`` archive
  1207. Notes
  1208. -----
  1209. Further explanation of the `fmt` parameter
  1210. (``%[flag]width[.precision]specifier``):
  1211. flags:
  1212. ``-`` : left justify
  1213. ``+`` : Forces to precede result with + or -.
  1214. ``0`` : Left pad the number with zeros instead of space (see width).
  1215. width:
  1216. Minimum number of characters to be printed. The value is not truncated
  1217. if it has more characters.
  1218. precision:
  1219. - For integer specifiers (eg. ``d,i,o,x``), the minimum number of
  1220. digits.
  1221. - For ``e, E`` and ``f`` specifiers, the number of digits to print
  1222. after the decimal point.
  1223. - For ``g`` and ``G``, the maximum number of significant digits.
  1224. - For ``s``, the maximum number of characters.
  1225. specifiers:
  1226. ``c`` : character
  1227. ``d`` or ``i`` : signed decimal integer
  1228. ``e`` or ``E`` : scientific notation with ``e`` or ``E``.
  1229. ``f`` : decimal floating point
  1230. ``g,G`` : use the shorter of ``e,E`` or ``f``
  1231. ``o`` : signed octal
  1232. ``s`` : string of characters
  1233. ``u`` : unsigned decimal integer
  1234. ``x,X`` : unsigned hexadecimal integer
  1235. This explanation of ``fmt`` is not complete, for an exhaustive
  1236. specification see [1]_.
  1237. References
  1238. ----------
  1239. .. [1] `Format Specification Mini-Language
  1240. <https://docs.python.org/library/string.html#format-specification-mini-language>`_,
  1241. Python Documentation.
  1242. Examples
  1243. --------
  1244. >>> x = y = z = np.arange(0.0,5.0,1.0)
  1245. >>> np.savetxt('test.out', x, delimiter=',') # X is an array
  1246. >>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
  1247. >>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
  1248. """
  1249. # Py3 conversions first
  1250. if isinstance(fmt, bytes):
  1251. fmt = asstr(fmt)
  1252. delimiter = asstr(delimiter)
  1253. class WriteWrap:
  1254. """Convert to bytes on bytestream inputs.
  1255. """
  1256. def __init__(self, fh, encoding):
  1257. self.fh = fh
  1258. self.encoding = encoding
  1259. self.do_write = self.first_write
  1260. def close(self):
  1261. self.fh.close()
  1262. def write(self, v):
  1263. self.do_write(v)
  1264. def write_bytes(self, v):
  1265. if isinstance(v, bytes):
  1266. self.fh.write(v)
  1267. else:
  1268. self.fh.write(v.encode(self.encoding))
  1269. def write_normal(self, v):
  1270. self.fh.write(asunicode(v))
  1271. def first_write(self, v):
  1272. try:
  1273. self.write_normal(v)
  1274. self.write = self.write_normal
  1275. except TypeError:
  1276. # input is probably a bytestream
  1277. self.write_bytes(v)
  1278. self.write = self.write_bytes
  1279. own_fh = False
  1280. if isinstance(fname, os_PathLike):
  1281. fname = os_fspath(fname)
  1282. if _is_string_like(fname):
  1283. # datasource doesn't support creating a new file ...
  1284. open(fname, 'wt').close()
  1285. fh = np.lib._datasource.open(fname, 'wt', encoding=encoding)
  1286. own_fh = True
  1287. elif hasattr(fname, 'write'):
  1288. # wrap to handle byte output streams
  1289. fh = WriteWrap(fname, encoding or 'latin1')
  1290. else:
  1291. raise ValueError('fname must be a string or file handle')
  1292. try:
  1293. X = np.asarray(X)
  1294. # Handle 1-dimensional arrays
  1295. if X.ndim == 0 or X.ndim > 2:
  1296. raise ValueError(
  1297. "Expected 1D or 2D array, got %dD array instead" % X.ndim)
  1298. elif X.ndim == 1:
  1299. # Common case -- 1d array of numbers
  1300. if X.dtype.names is None:
  1301. X = np.atleast_2d(X).T
  1302. ncol = 1
  1303. # Complex dtype -- each field indicates a separate column
  1304. else:
  1305. ncol = len(X.dtype.names)
  1306. else:
  1307. ncol = X.shape[1]
  1308. iscomplex_X = np.iscomplexobj(X)
  1309. # `fmt` can be a string with multiple insertion points or a
  1310. # list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
  1311. if type(fmt) in (list, tuple):
  1312. if len(fmt) != ncol:
  1313. raise AttributeError('fmt has wrong shape. %s' % str(fmt))
  1314. format = asstr(delimiter).join(map(asstr, fmt))
  1315. elif isinstance(fmt, str):
  1316. n_fmt_chars = fmt.count('%')
  1317. error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
  1318. if n_fmt_chars == 1:
  1319. if iscomplex_X:
  1320. fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
  1321. else:
  1322. fmt = [fmt, ] * ncol
  1323. format = delimiter.join(fmt)
  1324. elif iscomplex_X and n_fmt_chars != (2 * ncol):
  1325. raise error
  1326. elif ((not iscomplex_X) and n_fmt_chars != ncol):
  1327. raise error
  1328. else:
  1329. format = fmt
  1330. else:
  1331. raise ValueError('invalid fmt: %r' % (fmt,))
  1332. if len(header) > 0:
  1333. header = header.replace('\n', '\n' + comments)
  1334. fh.write(comments + header + newline)
  1335. if iscomplex_X:
  1336. for row in X:
  1337. row2 = []
  1338. for number in row:
  1339. row2.append(number.real)
  1340. row2.append(number.imag)
  1341. s = format % tuple(row2) + newline
  1342. fh.write(s.replace('+-', '-'))
  1343. else:
  1344. for row in X:
  1345. try:
  1346. v = format % tuple(row) + newline
  1347. except TypeError as e:
  1348. raise TypeError("Mismatch between array dtype ('%s') and "
  1349. "format specifier ('%s')"
  1350. % (str(X.dtype), format)) from e
  1351. fh.write(v)
  1352. if len(footer) > 0:
  1353. footer = footer.replace('\n', '\n' + comments)
  1354. fh.write(comments + footer + newline)
  1355. finally:
  1356. if own_fh:
  1357. fh.close()
  1358. @set_module('numpy')
  1359. def fromregex(file, regexp, dtype, encoding=None):
  1360. r"""
  1361. Construct an array from a text file, using regular expression parsing.
  1362. The returned array is always a structured array, and is constructed from
  1363. all matches of the regular expression in the file. Groups in the regular
  1364. expression are converted to fields of the structured array.
  1365. Parameters
  1366. ----------
  1367. file : path or file
  1368. Filename or file object to read.
  1369. .. versionchanged:: 1.22.0
  1370. Now accepts `os.PathLike` implementations.
  1371. regexp : str or regexp
  1372. Regular expression used to parse the file.
  1373. Groups in the regular expression correspond to fields in the dtype.
  1374. dtype : dtype or list of dtypes
  1375. Dtype for the structured array; must be a structured datatype.
  1376. encoding : str, optional
  1377. Encoding used to decode the inputfile. Does not apply to input streams.
  1378. .. versionadded:: 1.14.0
  1379. Returns
  1380. -------
  1381. output : ndarray
  1382. The output array, containing the part of the content of `file` that
  1383. was matched by `regexp`. `output` is always a structured array.
  1384. Raises
  1385. ------
  1386. TypeError
  1387. When `dtype` is not a valid dtype for a structured array.
  1388. See Also
  1389. --------
  1390. fromstring, loadtxt
  1391. Notes
  1392. -----
  1393. Dtypes for structured arrays can be specified in several forms, but all
  1394. forms specify at least the data type and field name. For details see
  1395. `basics.rec`.
  1396. Examples
  1397. --------
  1398. >>> from io import StringIO
  1399. >>> text = StringIO("1312 foo\n1534 bar\n444 qux")
  1400. >>> regexp = r"(\d+)\s+(...)" # match [digits, whitespace, anything]
  1401. >>> output = np.fromregex(text, regexp,
  1402. ... [('num', np.int64), ('key', 'S3')])
  1403. >>> output
  1404. array([(1312, b'foo'), (1534, b'bar'), ( 444, b'qux')],
  1405. dtype=[('num', '<i8'), ('key', 'S3')])
  1406. >>> output['num']
  1407. array([1312, 1534, 444])
  1408. """
  1409. own_fh = False
  1410. if not hasattr(file, "read"):
  1411. file = os.fspath(file)
  1412. file = np.lib._datasource.open(file, 'rt', encoding=encoding)
  1413. own_fh = True
  1414. try:
  1415. if not isinstance(dtype, np.dtype):
  1416. dtype = np.dtype(dtype)
  1417. if dtype.names is None:
  1418. raise TypeError('dtype must be a structured datatype.')
  1419. content = file.read()
  1420. if isinstance(content, bytes) and isinstance(regexp, str):
  1421. regexp = asbytes(regexp)
  1422. elif isinstance(content, str) and isinstance(regexp, bytes):
  1423. regexp = asstr(regexp)
  1424. if not hasattr(regexp, 'match'):
  1425. regexp = re.compile(regexp)
  1426. seq = regexp.findall(content)
  1427. if seq and not isinstance(seq[0], tuple):
  1428. # Only one group is in the regexp.
  1429. # Create the new array as a single data-type and then
  1430. # re-interpret as a single-field structured array.
  1431. newdtype = np.dtype(dtype[dtype.names[0]])
  1432. output = np.array(seq, dtype=newdtype)
  1433. output.dtype = dtype
  1434. else:
  1435. output = np.array(seq, dtype=dtype)
  1436. return output
  1437. finally:
  1438. if own_fh:
  1439. file.close()
  1440. #####--------------------------------------------------------------------------
  1441. #---- --- ASCII functions ---
  1442. #####--------------------------------------------------------------------------
  1443. def _genfromtxt_dispatcher(fname, dtype=None, comments=None, delimiter=None,
  1444. skip_header=None, skip_footer=None, converters=None,
  1445. missing_values=None, filling_values=None, usecols=None,
  1446. names=None, excludelist=None, deletechars=None,
  1447. replace_space=None, autostrip=None, case_sensitive=None,
  1448. defaultfmt=None, unpack=None, usemask=None, loose=None,
  1449. invalid_raise=None, max_rows=None, encoding=None,
  1450. *, ndmin=None, like=None):
  1451. return (like,)
  1452. @set_array_function_like_doc
  1453. @set_module('numpy')
  1454. def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
  1455. skip_header=0, skip_footer=0, converters=None,
  1456. missing_values=None, filling_values=None, usecols=None,
  1457. names=None, excludelist=None,
  1458. deletechars=''.join(sorted(NameValidator.defaultdeletechars)),
  1459. replace_space='_', autostrip=False, case_sensitive=True,
  1460. defaultfmt="f%i", unpack=None, usemask=False, loose=True,
  1461. invalid_raise=True, max_rows=None, encoding='bytes',
  1462. *, ndmin=0, like=None):
  1463. """
  1464. Load data from a text file, with missing values handled as specified.
  1465. Each line past the first `skip_header` lines is split at the `delimiter`
  1466. character, and characters following the `comments` character are discarded.
  1467. Parameters
  1468. ----------
  1469. fname : file, str, pathlib.Path, list of str, generator
  1470. File, filename, list, or generator to read. If the filename
  1471. extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note
  1472. that generators must return bytes or strings. The strings
  1473. in a list or produced by a generator are treated as lines.
  1474. dtype : dtype, optional
  1475. Data type of the resulting array.
  1476. If None, the dtypes will be determined by the contents of each
  1477. column, individually.
  1478. comments : str, optional
  1479. The character used to indicate the start of a comment.
  1480. All the characters occurring on a line after a comment are discarded.
  1481. delimiter : str, int, or sequence, optional
  1482. The string used to separate values. By default, any consecutive
  1483. whitespaces act as delimiter. An integer or sequence of integers
  1484. can also be provided as width(s) of each field.
  1485. skiprows : int, optional
  1486. `skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
  1487. skip_header : int, optional
  1488. The number of lines to skip at the beginning of the file.
  1489. skip_footer : int, optional
  1490. The number of lines to skip at the end of the file.
  1491. converters : variable, optional
  1492. The set of functions that convert the data of a column to a value.
  1493. The converters can also be used to provide a default value
  1494. for missing data: ``converters = {3: lambda s: float(s or 0)}``.
  1495. missing : variable, optional
  1496. `missing` was removed in numpy 1.10. Please use `missing_values`
  1497. instead.
  1498. missing_values : variable, optional
  1499. The set of strings corresponding to missing data.
  1500. filling_values : variable, optional
  1501. The set of values to be used as default when the data are missing.
  1502. usecols : sequence, optional
  1503. Which columns to read, with 0 being the first. For example,
  1504. ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
  1505. names : {None, True, str, sequence}, optional
  1506. If `names` is True, the field names are read from the first line after
  1507. the first `skip_header` lines. This line can optionally be preceded
  1508. by a comment delimiter. If `names` is a sequence or a single-string of
  1509. comma-separated names, the names will be used to define the field names
  1510. in a structured dtype. If `names` is None, the names of the dtype
  1511. fields will be used, if any.
  1512. excludelist : sequence, optional
  1513. A list of names to exclude. This list is appended to the default list
  1514. ['return','file','print']. Excluded names are appended with an
  1515. underscore: for example, `file` would become `file_`.
  1516. deletechars : str, optional
  1517. A string combining invalid characters that must be deleted from the
  1518. names.
  1519. defaultfmt : str, optional
  1520. A format used to define default field names, such as "f%i" or "f_%02i".
  1521. autostrip : bool, optional
  1522. Whether to automatically strip white spaces from the variables.
  1523. replace_space : char, optional
  1524. Character(s) used in replacement of white spaces in the variable
  1525. names. By default, use a '_'.
  1526. case_sensitive : {True, False, 'upper', 'lower'}, optional
  1527. If True, field names are case sensitive.
  1528. If False or 'upper', field names are converted to upper case.
  1529. If 'lower', field names are converted to lower case.
  1530. unpack : bool, optional
  1531. If True, the returned array is transposed, so that arguments may be
  1532. unpacked using ``x, y, z = genfromtxt(...)``. When used with a
  1533. structured data-type, arrays are returned for each field.
  1534. Default is False.
  1535. usemask : bool, optional
  1536. If True, return a masked array.
  1537. If False, return a regular array.
  1538. loose : bool, optional
  1539. If True, do not raise errors for invalid values.
  1540. invalid_raise : bool, optional
  1541. If True, an exception is raised if an inconsistency is detected in the
  1542. number of columns.
  1543. If False, a warning is emitted and the offending lines are skipped.
  1544. max_rows : int, optional
  1545. The maximum number of rows to read. Must not be used with skip_footer
  1546. at the same time. If given, the value must be at least 1. Default is
  1547. to read the entire file.
  1548. .. versionadded:: 1.10.0
  1549. encoding : str, optional
  1550. Encoding used to decode the inputfile. Does not apply when `fname` is
  1551. a file object. The special value 'bytes' enables backward compatibility
  1552. workarounds that ensure that you receive byte arrays when possible
  1553. and passes latin1 encoded strings to converters. Override this value to
  1554. receive unicode arrays and pass strings as input to converters. If set
  1555. to None the system default is used. The default value is 'bytes'.
  1556. .. versionadded:: 1.14.0
  1557. ndmin : int, optional
  1558. Same parameter as `loadtxt`
  1559. .. versionadded:: 1.23.0
  1560. ${ARRAY_FUNCTION_LIKE}
  1561. .. versionadded:: 1.20.0
  1562. Returns
  1563. -------
  1564. out : ndarray
  1565. Data read from the text file. If `usemask` is True, this is a
  1566. masked array.
  1567. See Also
  1568. --------
  1569. numpy.loadtxt : equivalent function when no data is missing.
  1570. Notes
  1571. -----
  1572. * When spaces are used as delimiters, or when no delimiter has been given
  1573. as input, there should not be any missing data between two fields.
  1574. * When the variables are named (either by a flexible dtype or with `names`),
  1575. there must not be any header in the file (else a ValueError
  1576. exception is raised).
  1577. * Individual values are not stripped of spaces by default.
  1578. When using a custom converter, make sure the function does remove spaces.
  1579. References
  1580. ----------
  1581. .. [1] NumPy User Guide, section `I/O with NumPy
  1582. <https://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
  1583. Examples
  1584. --------
  1585. >>> from io import StringIO
  1586. >>> import numpy as np
  1587. Comma delimited file with mixed dtype
  1588. >>> s = StringIO(u"1,1.3,abcde")
  1589. >>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
  1590. ... ('mystring','S5')], delimiter=",")
  1591. >>> data
  1592. array((1, 1.3, b'abcde'),
  1593. dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
  1594. Using dtype = None
  1595. >>> _ = s.seek(0) # needed for StringIO example only
  1596. >>> data = np.genfromtxt(s, dtype=None,
  1597. ... names = ['myint','myfloat','mystring'], delimiter=",")
  1598. >>> data
  1599. array((1, 1.3, b'abcde'),
  1600. dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
  1601. Specifying dtype and names
  1602. >>> _ = s.seek(0)
  1603. >>> data = np.genfromtxt(s, dtype="i8,f8,S5",
  1604. ... names=['myint','myfloat','mystring'], delimiter=",")
  1605. >>> data
  1606. array((1, 1.3, b'abcde'),
  1607. dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
  1608. An example with fixed-width columns
  1609. >>> s = StringIO(u"11.3abcde")
  1610. >>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
  1611. ... delimiter=[1,3,5])
  1612. >>> data
  1613. array((1, 1.3, b'abcde'),
  1614. dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', 'S5')])
  1615. An example to show comments
  1616. >>> f = StringIO('''
  1617. ... text,# of chars
  1618. ... hello world,11
  1619. ... numpy,5''')
  1620. >>> np.genfromtxt(f, dtype='S12,S12', delimiter=',')
  1621. array([(b'text', b''), (b'hello world', b'11'), (b'numpy', b'5')],
  1622. dtype=[('f0', 'S12'), ('f1', 'S12')])
  1623. """
  1624. if like is not None:
  1625. return _genfromtxt_with_like(
  1626. fname, dtype=dtype, comments=comments, delimiter=delimiter,
  1627. skip_header=skip_header, skip_footer=skip_footer,
  1628. converters=converters, missing_values=missing_values,
  1629. filling_values=filling_values, usecols=usecols, names=names,
  1630. excludelist=excludelist, deletechars=deletechars,
  1631. replace_space=replace_space, autostrip=autostrip,
  1632. case_sensitive=case_sensitive, defaultfmt=defaultfmt,
  1633. unpack=unpack, usemask=usemask, loose=loose,
  1634. invalid_raise=invalid_raise, max_rows=max_rows, encoding=encoding,
  1635. ndmin=ndmin,
  1636. like=like
  1637. )
  1638. _ensure_ndmin_ndarray_check_param(ndmin)
  1639. if max_rows is not None:
  1640. if skip_footer:
  1641. raise ValueError(
  1642. "The keywords 'skip_footer' and 'max_rows' can not be "
  1643. "specified at the same time.")
  1644. if max_rows < 1:
  1645. raise ValueError("'max_rows' must be at least 1.")
  1646. if usemask:
  1647. from numpy.ma import MaskedArray, make_mask_descr
  1648. # Check the input dictionary of converters
  1649. user_converters = converters or {}
  1650. if not isinstance(user_converters, dict):
  1651. raise TypeError(
  1652. "The input argument 'converter' should be a valid dictionary "
  1653. "(got '%s' instead)" % type(user_converters))
  1654. if encoding == 'bytes':
  1655. encoding = None
  1656. byte_converters = True
  1657. else:
  1658. byte_converters = False
  1659. # Initialize the filehandle, the LineSplitter and the NameValidator
  1660. if isinstance(fname, os_PathLike):
  1661. fname = os_fspath(fname)
  1662. if isinstance(fname, str):
  1663. fid = np.lib._datasource.open(fname, 'rt', encoding=encoding)
  1664. fid_ctx = contextlib.closing(fid)
  1665. else:
  1666. fid = fname
  1667. fid_ctx = contextlib.nullcontext(fid)
  1668. try:
  1669. fhd = iter(fid)
  1670. except TypeError as e:
  1671. raise TypeError(
  1672. "fname must be a string, a filehandle, a sequence of strings,\n"
  1673. f"or an iterator of strings. Got {type(fname)} instead."
  1674. ) from e
  1675. with fid_ctx:
  1676. split_line = LineSplitter(delimiter=delimiter, comments=comments,
  1677. autostrip=autostrip, encoding=encoding)
  1678. validate_names = NameValidator(excludelist=excludelist,
  1679. deletechars=deletechars,
  1680. case_sensitive=case_sensitive,
  1681. replace_space=replace_space)
  1682. # Skip the first `skip_header` rows
  1683. try:
  1684. for i in range(skip_header):
  1685. next(fhd)
  1686. # Keep on until we find the first valid values
  1687. first_values = None
  1688. while not first_values:
  1689. first_line = _decode_line(next(fhd), encoding)
  1690. if (names is True) and (comments is not None):
  1691. if comments in first_line:
  1692. first_line = (
  1693. ''.join(first_line.split(comments)[1:]))
  1694. first_values = split_line(first_line)
  1695. except StopIteration:
  1696. # return an empty array if the datafile is empty
  1697. first_line = ''
  1698. first_values = []
  1699. warnings.warn('genfromtxt: Empty input file: "%s"' % fname, stacklevel=2)
  1700. # Should we take the first values as names ?
  1701. if names is True:
  1702. fval = first_values[0].strip()
  1703. if comments is not None:
  1704. if fval in comments:
  1705. del first_values[0]
  1706. # Check the columns to use: make sure `usecols` is a list
  1707. if usecols is not None:
  1708. try:
  1709. usecols = [_.strip() for _ in usecols.split(",")]
  1710. except AttributeError:
  1711. try:
  1712. usecols = list(usecols)
  1713. except TypeError:
  1714. usecols = [usecols, ]
  1715. nbcols = len(usecols or first_values)
  1716. # Check the names and overwrite the dtype.names if needed
  1717. if names is True:
  1718. names = validate_names([str(_.strip()) for _ in first_values])
  1719. first_line = ''
  1720. elif _is_string_like(names):
  1721. names = validate_names([_.strip() for _ in names.split(',')])
  1722. elif names:
  1723. names = validate_names(names)
  1724. # Get the dtype
  1725. if dtype is not None:
  1726. dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
  1727. excludelist=excludelist,
  1728. deletechars=deletechars,
  1729. case_sensitive=case_sensitive,
  1730. replace_space=replace_space)
  1731. # Make sure the names is a list (for 2.5)
  1732. if names is not None:
  1733. names = list(names)
  1734. if usecols:
  1735. for (i, current) in enumerate(usecols):
  1736. # if usecols is a list of names, convert to a list of indices
  1737. if _is_string_like(current):
  1738. usecols[i] = names.index(current)
  1739. elif current < 0:
  1740. usecols[i] = current + len(first_values)
  1741. # If the dtype is not None, make sure we update it
  1742. if (dtype is not None) and (len(dtype) > nbcols):
  1743. descr = dtype.descr
  1744. dtype = np.dtype([descr[_] for _ in usecols])
  1745. names = list(dtype.names)
  1746. # If `names` is not None, update the names
  1747. elif (names is not None) and (len(names) > nbcols):
  1748. names = [names[_] for _ in usecols]
  1749. elif (names is not None) and (dtype is not None):
  1750. names = list(dtype.names)
  1751. # Process the missing values ...............................
  1752. # Rename missing_values for convenience
  1753. user_missing_values = missing_values or ()
  1754. if isinstance(user_missing_values, bytes):
  1755. user_missing_values = user_missing_values.decode('latin1')
  1756. # Define the list of missing_values (one column: one list)
  1757. missing_values = [list(['']) for _ in range(nbcols)]
  1758. # We have a dictionary: process it field by field
  1759. if isinstance(user_missing_values, dict):
  1760. # Loop on the items
  1761. for (key, val) in user_missing_values.items():
  1762. # Is the key a string ?
  1763. if _is_string_like(key):
  1764. try:
  1765. # Transform it into an integer
  1766. key = names.index(key)
  1767. except ValueError:
  1768. # We couldn't find it: the name must have been dropped
  1769. continue
  1770. # Redefine the key as needed if it's a column number
  1771. if usecols:
  1772. try:
  1773. key = usecols.index(key)
  1774. except ValueError:
  1775. pass
  1776. # Transform the value as a list of string
  1777. if isinstance(val, (list, tuple)):
  1778. val = [str(_) for _ in val]
  1779. else:
  1780. val = [str(val), ]
  1781. # Add the value(s) to the current list of missing
  1782. if key is None:
  1783. # None acts as default
  1784. for miss in missing_values:
  1785. miss.extend(val)
  1786. else:
  1787. missing_values[key].extend(val)
  1788. # We have a sequence : each item matches a column
  1789. elif isinstance(user_missing_values, (list, tuple)):
  1790. for (value, entry) in zip(user_missing_values, missing_values):
  1791. value = str(value)
  1792. if value not in entry:
  1793. entry.append(value)
  1794. # We have a string : apply it to all entries
  1795. elif isinstance(user_missing_values, str):
  1796. user_value = user_missing_values.split(",")
  1797. for entry in missing_values:
  1798. entry.extend(user_value)
  1799. # We have something else: apply it to all entries
  1800. else:
  1801. for entry in missing_values:
  1802. entry.extend([str(user_missing_values)])
  1803. # Process the filling_values ...............................
  1804. # Rename the input for convenience
  1805. user_filling_values = filling_values
  1806. if user_filling_values is None:
  1807. user_filling_values = []
  1808. # Define the default
  1809. filling_values = [None] * nbcols
  1810. # We have a dictionary : update each entry individually
  1811. if isinstance(user_filling_values, dict):
  1812. for (key, val) in user_filling_values.items():
  1813. if _is_string_like(key):
  1814. try:
  1815. # Transform it into an integer
  1816. key = names.index(key)
  1817. except ValueError:
  1818. # We couldn't find it: the name must have been dropped,
  1819. continue
  1820. # Redefine the key if it's a column number and usecols is defined
  1821. if usecols:
  1822. try:
  1823. key = usecols.index(key)
  1824. except ValueError:
  1825. pass
  1826. # Add the value to the list
  1827. filling_values[key] = val
  1828. # We have a sequence : update on a one-to-one basis
  1829. elif isinstance(user_filling_values, (list, tuple)):
  1830. n = len(user_filling_values)
  1831. if (n <= nbcols):
  1832. filling_values[:n] = user_filling_values
  1833. else:
  1834. filling_values = user_filling_values[:nbcols]
  1835. # We have something else : use it for all entries
  1836. else:
  1837. filling_values = [user_filling_values] * nbcols
  1838. # Initialize the converters ................................
  1839. if dtype is None:
  1840. # Note: we can't use a [...]*nbcols, as we would have 3 times the same
  1841. # ... converter, instead of 3 different converters.
  1842. converters = [StringConverter(None, missing_values=miss, default=fill)
  1843. for (miss, fill) in zip(missing_values, filling_values)]
  1844. else:
  1845. dtype_flat = flatten_dtype(dtype, flatten_base=True)
  1846. # Initialize the converters
  1847. if len(dtype_flat) > 1:
  1848. # Flexible type : get a converter from each dtype
  1849. zipit = zip(dtype_flat, missing_values, filling_values)
  1850. converters = [StringConverter(dt, locked=True,
  1851. missing_values=miss, default=fill)
  1852. for (dt, miss, fill) in zipit]
  1853. else:
  1854. # Set to a default converter (but w/ different missing values)
  1855. zipit = zip(missing_values, filling_values)
  1856. converters = [StringConverter(dtype, locked=True,
  1857. missing_values=miss, default=fill)
  1858. for (miss, fill) in zipit]
  1859. # Update the converters to use the user-defined ones
  1860. uc_update = []
  1861. for (j, conv) in user_converters.items():
  1862. # If the converter is specified by column names, use the index instead
  1863. if _is_string_like(j):
  1864. try:
  1865. j = names.index(j)
  1866. i = j
  1867. except ValueError:
  1868. continue
  1869. elif usecols:
  1870. try:
  1871. i = usecols.index(j)
  1872. except ValueError:
  1873. # Unused converter specified
  1874. continue
  1875. else:
  1876. i = j
  1877. # Find the value to test - first_line is not filtered by usecols:
  1878. if len(first_line):
  1879. testing_value = first_values[j]
  1880. else:
  1881. testing_value = None
  1882. if conv is bytes:
  1883. user_conv = asbytes
  1884. elif byte_converters:
  1885. # converters may use decode to workaround numpy's old behaviour,
  1886. # so encode the string again before passing to the user converter
  1887. def tobytes_first(x, conv):
  1888. if type(x) is bytes:
  1889. return conv(x)
  1890. return conv(x.encode("latin1"))
  1891. user_conv = functools.partial(tobytes_first, conv=conv)
  1892. else:
  1893. user_conv = conv
  1894. converters[i].update(user_conv, locked=True,
  1895. testing_value=testing_value,
  1896. default=filling_values[i],
  1897. missing_values=missing_values[i],)
  1898. uc_update.append((i, user_conv))
  1899. # Make sure we have the corrected keys in user_converters...
  1900. user_converters.update(uc_update)
  1901. # Fixme: possible error as following variable never used.
  1902. # miss_chars = [_.missing_values for _ in converters]
  1903. # Initialize the output lists ...
  1904. # ... rows
  1905. rows = []
  1906. append_to_rows = rows.append
  1907. # ... masks
  1908. if usemask:
  1909. masks = []
  1910. append_to_masks = masks.append
  1911. # ... invalid
  1912. invalid = []
  1913. append_to_invalid = invalid.append
  1914. # Parse each line
  1915. for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
  1916. values = split_line(line)
  1917. nbvalues = len(values)
  1918. # Skip an empty line
  1919. if nbvalues == 0:
  1920. continue
  1921. if usecols:
  1922. # Select only the columns we need
  1923. try:
  1924. values = [values[_] for _ in usecols]
  1925. except IndexError:
  1926. append_to_invalid((i + skip_header + 1, nbvalues))
  1927. continue
  1928. elif nbvalues != nbcols:
  1929. append_to_invalid((i + skip_header + 1, nbvalues))
  1930. continue
  1931. # Store the values
  1932. append_to_rows(tuple(values))
  1933. if usemask:
  1934. append_to_masks(tuple([v.strip() in m
  1935. for (v, m) in zip(values,
  1936. missing_values)]))
  1937. if len(rows) == max_rows:
  1938. break
  1939. # Upgrade the converters (if needed)
  1940. if dtype is None:
  1941. for (i, converter) in enumerate(converters):
  1942. current_column = [itemgetter(i)(_m) for _m in rows]
  1943. try:
  1944. converter.iterupgrade(current_column)
  1945. except ConverterLockError:
  1946. errmsg = "Converter #%i is locked and cannot be upgraded: " % i
  1947. current_column = map(itemgetter(i), rows)
  1948. for (j, value) in enumerate(current_column):
  1949. try:
  1950. converter.upgrade(value)
  1951. except (ConverterError, ValueError):
  1952. errmsg += "(occurred line #%i for value '%s')"
  1953. errmsg %= (j + 1 + skip_header, value)
  1954. raise ConverterError(errmsg)
  1955. # Check that we don't have invalid values
  1956. nbinvalid = len(invalid)
  1957. if nbinvalid > 0:
  1958. nbrows = len(rows) + nbinvalid - skip_footer
  1959. # Construct the error message
  1960. template = " Line #%%i (got %%i columns instead of %i)" % nbcols
  1961. if skip_footer > 0:
  1962. nbinvalid_skipped = len([_ for _ in invalid
  1963. if _[0] > nbrows + skip_header])
  1964. invalid = invalid[:nbinvalid - nbinvalid_skipped]
  1965. skip_footer -= nbinvalid_skipped
  1966. #
  1967. # nbrows -= skip_footer
  1968. # errmsg = [template % (i, nb)
  1969. # for (i, nb) in invalid if i < nbrows]
  1970. # else:
  1971. errmsg = [template % (i, nb)
  1972. for (i, nb) in invalid]
  1973. if len(errmsg):
  1974. errmsg.insert(0, "Some errors were detected !")
  1975. errmsg = "\n".join(errmsg)
  1976. # Raise an exception ?
  1977. if invalid_raise:
  1978. raise ValueError(errmsg)
  1979. # Issue a warning ?
  1980. else:
  1981. warnings.warn(errmsg, ConversionWarning, stacklevel=2)
  1982. # Strip the last skip_footer data
  1983. if skip_footer > 0:
  1984. rows = rows[:-skip_footer]
  1985. if usemask:
  1986. masks = masks[:-skip_footer]
  1987. # Convert each value according to the converter:
  1988. # We want to modify the list in place to avoid creating a new one...
  1989. if loose:
  1990. rows = list(
  1991. zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
  1992. for (i, conv) in enumerate(converters)]))
  1993. else:
  1994. rows = list(
  1995. zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
  1996. for (i, conv) in enumerate(converters)]))
  1997. # Reset the dtype
  1998. data = rows
  1999. if dtype is None:
  2000. # Get the dtypes from the types of the converters
  2001. column_types = [conv.type for conv in converters]
  2002. # Find the columns with strings...
  2003. strcolidx = [i for (i, v) in enumerate(column_types)
  2004. if v == np.unicode_]
  2005. if byte_converters and strcolidx:
  2006. # convert strings back to bytes for backward compatibility
  2007. warnings.warn(
  2008. "Reading unicode strings without specifying the encoding "
  2009. "argument is deprecated. Set the encoding, use None for the "
  2010. "system default.",
  2011. np.VisibleDeprecationWarning, stacklevel=2)
  2012. def encode_unicode_cols(row_tup):
  2013. row = list(row_tup)
  2014. for i in strcolidx:
  2015. row[i] = row[i].encode('latin1')
  2016. return tuple(row)
  2017. try:
  2018. data = [encode_unicode_cols(r) for r in data]
  2019. except UnicodeEncodeError:
  2020. pass
  2021. else:
  2022. for i in strcolidx:
  2023. column_types[i] = np.bytes_
  2024. # Update string types to be the right length
  2025. sized_column_types = column_types[:]
  2026. for i, col_type in enumerate(column_types):
  2027. if np.issubdtype(col_type, np.character):
  2028. n_chars = max(len(row[i]) for row in data)
  2029. sized_column_types[i] = (col_type, n_chars)
  2030. if names is None:
  2031. # If the dtype is uniform (before sizing strings)
  2032. base = {
  2033. c_type
  2034. for c, c_type in zip(converters, column_types)
  2035. if c._checked}
  2036. if len(base) == 1:
  2037. uniform_type, = base
  2038. (ddtype, mdtype) = (uniform_type, bool)
  2039. else:
  2040. ddtype = [(defaultfmt % i, dt)
  2041. for (i, dt) in enumerate(sized_column_types)]
  2042. if usemask:
  2043. mdtype = [(defaultfmt % i, bool)
  2044. for (i, dt) in enumerate(sized_column_types)]
  2045. else:
  2046. ddtype = list(zip(names, sized_column_types))
  2047. mdtype = list(zip(names, [bool] * len(sized_column_types)))
  2048. output = np.array(data, dtype=ddtype)
  2049. if usemask:
  2050. outputmask = np.array(masks, dtype=mdtype)
  2051. else:
  2052. # Overwrite the initial dtype names if needed
  2053. if names and dtype.names is not None:
  2054. dtype.names = names
  2055. # Case 1. We have a structured type
  2056. if len(dtype_flat) > 1:
  2057. # Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
  2058. # First, create the array using a flattened dtype:
  2059. # [('a', int), ('b1', int), ('b2', float)]
  2060. # Then, view the array using the specified dtype.
  2061. if 'O' in (_.char for _ in dtype_flat):
  2062. if has_nested_fields(dtype):
  2063. raise NotImplementedError(
  2064. "Nested fields involving objects are not supported...")
  2065. else:
  2066. output = np.array(data, dtype=dtype)
  2067. else:
  2068. rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
  2069. output = rows.view(dtype)
  2070. # Now, process the rowmasks the same way
  2071. if usemask:
  2072. rowmasks = np.array(
  2073. masks, dtype=np.dtype([('', bool) for t in dtype_flat]))
  2074. # Construct the new dtype
  2075. mdtype = make_mask_descr(dtype)
  2076. outputmask = rowmasks.view(mdtype)
  2077. # Case #2. We have a basic dtype
  2078. else:
  2079. # We used some user-defined converters
  2080. if user_converters:
  2081. ishomogeneous = True
  2082. descr = []
  2083. for i, ttype in enumerate([conv.type for conv in converters]):
  2084. # Keep the dtype of the current converter
  2085. if i in user_converters:
  2086. ishomogeneous &= (ttype == dtype.type)
  2087. if np.issubdtype(ttype, np.character):
  2088. ttype = (ttype, max(len(row[i]) for row in data))
  2089. descr.append(('', ttype))
  2090. else:
  2091. descr.append(('', dtype))
  2092. # So we changed the dtype ?
  2093. if not ishomogeneous:
  2094. # We have more than one field
  2095. if len(descr) > 1:
  2096. dtype = np.dtype(descr)
  2097. # We have only one field: drop the name if not needed.
  2098. else:
  2099. dtype = np.dtype(ttype)
  2100. #
  2101. output = np.array(data, dtype)
  2102. if usemask:
  2103. if dtype.names is not None:
  2104. mdtype = [(_, bool) for _ in dtype.names]
  2105. else:
  2106. mdtype = bool
  2107. outputmask = np.array(masks, dtype=mdtype)
  2108. # Try to take care of the missing data we missed
  2109. names = output.dtype.names
  2110. if usemask and names:
  2111. for (name, conv) in zip(names, converters):
  2112. missing_values = [conv(_) for _ in conv.missing_values
  2113. if _ != '']
  2114. for mval in missing_values:
  2115. outputmask[name] |= (output[name] == mval)
  2116. # Construct the final array
  2117. if usemask:
  2118. output = output.view(MaskedArray)
  2119. output._mask = outputmask
  2120. output = _ensure_ndmin_ndarray(output, ndmin=ndmin)
  2121. if unpack:
  2122. if names is None:
  2123. return output.T
  2124. elif len(names) == 1:
  2125. # squeeze single-name dtypes too
  2126. return output[names[0]]
  2127. else:
  2128. # For structured arrays with multiple fields,
  2129. # return an array for each field.
  2130. return [output[field] for field in names]
  2131. return output
  2132. _genfromtxt_with_like = array_function_dispatch(
  2133. _genfromtxt_dispatcher, use_like=True
  2134. )(genfromtxt)
  2135. def recfromtxt(fname, **kwargs):
  2136. """
  2137. Load ASCII data from a file and return it in a record array.
  2138. If ``usemask=False`` a standard `recarray` is returned,
  2139. if ``usemask=True`` a MaskedRecords array is returned.
  2140. Parameters
  2141. ----------
  2142. fname, kwargs : For a description of input parameters, see `genfromtxt`.
  2143. See Also
  2144. --------
  2145. numpy.genfromtxt : generic function
  2146. Notes
  2147. -----
  2148. By default, `dtype` is None, which means that the data-type of the output
  2149. array will be determined from the data.
  2150. """
  2151. kwargs.setdefault("dtype", None)
  2152. usemask = kwargs.get('usemask', False)
  2153. output = genfromtxt(fname, **kwargs)
  2154. if usemask:
  2155. from numpy.ma.mrecords import MaskedRecords
  2156. output = output.view(MaskedRecords)
  2157. else:
  2158. output = output.view(np.recarray)
  2159. return output
  2160. def recfromcsv(fname, **kwargs):
  2161. """
  2162. Load ASCII data stored in a comma-separated file.
  2163. The returned array is a record array (if ``usemask=False``, see
  2164. `recarray`) or a masked record array (if ``usemask=True``,
  2165. see `ma.mrecords.MaskedRecords`).
  2166. Parameters
  2167. ----------
  2168. fname, kwargs : For a description of input parameters, see `genfromtxt`.
  2169. See Also
  2170. --------
  2171. numpy.genfromtxt : generic function to load ASCII data.
  2172. Notes
  2173. -----
  2174. By default, `dtype` is None, which means that the data-type of the output
  2175. array will be determined from the data.
  2176. """
  2177. # Set default kwargs for genfromtxt as relevant to csv import.
  2178. kwargs.setdefault("case_sensitive", "lower")
  2179. kwargs.setdefault("names", True)
  2180. kwargs.setdefault("delimiter", ",")
  2181. kwargs.setdefault("dtype", None)
  2182. output = genfromtxt(fname, **kwargs)
  2183. usemask = kwargs.get("usemask", False)
  2184. if usemask:
  2185. from numpy.ma.mrecords import MaskedRecords
  2186. output = output.view(MaskedRecords)
  2187. else:
  2188. output = output.view(np.recarray)
  2189. return output