m2m模型翻译
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2415 lines
87 KiB

6 months ago
  1. import sys
  2. import os
  3. import re
  4. import functools
  5. import itertools
  6. import warnings
  7. import weakref
  8. import contextlib
  9. from operator import itemgetter, index as opindex
  10. from collections.abc import Mapping
  11. import numpy as np
  12. from . import format
  13. from ._datasource import DataSource
  14. from numpy.core import overrides
  15. from numpy.core.multiarray import packbits, unpackbits
  16. from numpy.core.overrides import set_array_function_like_doc, set_module
  17. from numpy.core._internal import recursive
  18. from ._iotools import (
  19. LineSplitter, NameValidator, StringConverter, ConverterError,
  20. ConverterLockError, ConversionWarning, _is_string_like,
  21. has_nested_fields, flatten_dtype, easy_dtype, _decode_line
  22. )
  23. from numpy.compat import (
  24. asbytes, asstr, asunicode, os_fspath, os_PathLike,
  25. pickle
  26. )
  27. @set_module('numpy')
  28. def loads(*args, **kwargs):
  29. # NumPy 1.15.0, 2017-12-10
  30. warnings.warn(
  31. "np.loads is deprecated, use pickle.loads instead",
  32. DeprecationWarning, stacklevel=2)
  33. return pickle.loads(*args, **kwargs)
  34. __all__ = [
  35. 'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
  36. 'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
  37. 'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
  38. ]
  39. array_function_dispatch = functools.partial(
  40. overrides.array_function_dispatch, module='numpy')
  41. class BagObj:
  42. """
  43. BagObj(obj)
  44. Convert attribute look-ups to getitems on the object passed in.
  45. Parameters
  46. ----------
  47. obj : class instance
  48. Object on which attribute look-up is performed.
  49. Examples
  50. --------
  51. >>> from numpy.lib.npyio import BagObj as BO
  52. >>> class BagDemo:
  53. ... def __getitem__(self, key): # An instance of BagObj(BagDemo)
  54. ... # will call this method when any
  55. ... # attribute look-up is required
  56. ... result = "Doesn't matter what you want, "
  57. ... return result + "you're gonna get this"
  58. ...
  59. >>> demo_obj = BagDemo()
  60. >>> bagobj = BO(demo_obj)
  61. >>> bagobj.hello_there
  62. "Doesn't matter what you want, you're gonna get this"
  63. >>> bagobj.I_can_be_anything
  64. "Doesn't matter what you want, you're gonna get this"
  65. """
  66. def __init__(self, obj):
  67. # Use weakref to make NpzFile objects collectable by refcount
  68. self._obj = weakref.proxy(obj)
  69. def __getattribute__(self, key):
  70. try:
  71. return object.__getattribute__(self, '_obj')[key]
  72. except KeyError:
  73. raise AttributeError(key) from None
  74. def __dir__(self):
  75. """
  76. Enables dir(bagobj) to list the files in an NpzFile.
  77. This also enables tab-completion in an interpreter or IPython.
  78. """
  79. return list(object.__getattribute__(self, '_obj').keys())
  80. def zipfile_factory(file, *args, **kwargs):
  81. """
  82. Create a ZipFile.
  83. Allows for Zip64, and the `file` argument can accept file, str, or
  84. pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile
  85. constructor.
  86. """
  87. if not hasattr(file, 'read'):
  88. file = os_fspath(file)
  89. import zipfile
  90. kwargs['allowZip64'] = True
  91. return zipfile.ZipFile(file, *args, **kwargs)
  92. class NpzFile(Mapping):
  93. """
  94. NpzFile(fid)
  95. A dictionary-like object with lazy-loading of files in the zipped
  96. archive provided on construction.
  97. `NpzFile` is used to load files in the NumPy ``.npz`` data archive
  98. format. It assumes that files in the archive have a ``.npy`` extension,
  99. other files are ignored.
  100. The arrays and file strings are lazily loaded on either
  101. getitem access using ``obj['key']`` or attribute lookup using
  102. ``obj.f.key``. A list of all files (without ``.npy`` extensions) can
  103. be obtained with ``obj.files`` and the ZipFile object itself using
  104. ``obj.zip``.
  105. Attributes
  106. ----------
  107. files : list of str
  108. List of all files in the archive with a ``.npy`` extension.
  109. zip : ZipFile instance
  110. The ZipFile object initialized with the zipped archive.
  111. f : BagObj instance
  112. An object on which attribute can be performed as an alternative
  113. to getitem access on the `NpzFile` instance itself.
  114. allow_pickle : bool, optional
  115. Allow loading pickled data. Default: False
  116. .. versionchanged:: 1.16.3
  117. Made default False in response to CVE-2019-6446.
  118. pickle_kwargs : dict, optional
  119. Additional keyword arguments to pass on to pickle.load.
  120. These are only useful when loading object arrays saved on
  121. Python 2 when using Python 3.
  122. Parameters
  123. ----------
  124. fid : file or str
  125. The zipped archive to open. This is either a file-like object
  126. or a string containing the path to the archive.
  127. own_fid : bool, optional
  128. Whether NpzFile should close the file handle.
  129. Requires that `fid` is a file-like object.
  130. Examples
  131. --------
  132. >>> from tempfile import TemporaryFile
  133. >>> outfile = TemporaryFile()
  134. >>> x = np.arange(10)
  135. >>> y = np.sin(x)
  136. >>> np.savez(outfile, x=x, y=y)
  137. >>> _ = outfile.seek(0)
  138. >>> npz = np.load(outfile)
  139. >>> isinstance(npz, np.lib.io.NpzFile)
  140. True
  141. >>> sorted(npz.files)
  142. ['x', 'y']
  143. >>> npz['x'] # getitem access
  144. array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
  145. >>> npz.f.x # attribute lookup
  146. array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
  147. """
  148. # Make __exit__ safe if zipfile_factory raises an exception
  149. zip = None
  150. fid = None
  151. def __init__(self, fid, own_fid=False, allow_pickle=False,
  152. pickle_kwargs=None):
  153. # Import is postponed to here since zipfile depends on gzip, an
  154. # optional component of the so-called standard library.
  155. _zip = zipfile_factory(fid)
  156. self._files = _zip.namelist()
  157. self.files = []
  158. self.allow_pickle = allow_pickle
  159. self.pickle_kwargs = pickle_kwargs
  160. for x in self._files:
  161. if x.endswith('.npy'):
  162. self.files.append(x[:-4])
  163. else:
  164. self.files.append(x)
  165. self.zip = _zip
  166. self.f = BagObj(self)
  167. if own_fid:
  168. self.fid = fid
  169. def __enter__(self):
  170. return self
  171. def __exit__(self, exc_type, exc_value, traceback):
  172. self.close()
  173. def close(self):
  174. """
  175. Close the file.
  176. """
  177. if self.zip is not None:
  178. self.zip.close()
  179. self.zip = None
  180. if self.fid is not None:
  181. self.fid.close()
  182. self.fid = None
  183. self.f = None # break reference cycle
  184. def __del__(self):
  185. self.close()
  186. # Implement the Mapping ABC
  187. def __iter__(self):
  188. return iter(self.files)
  189. def __len__(self):
  190. return len(self.files)
  191. def __getitem__(self, key):
  192. # FIXME: This seems like it will copy strings around
  193. # more than is strictly necessary. The zipfile
  194. # will read the string and then
  195. # the format.read_array will copy the string
  196. # to another place in memory.
  197. # It would be better if the zipfile could read
  198. # (or at least uncompress) the data
  199. # directly into the array memory.
  200. member = False
  201. if key in self._files:
  202. member = True
  203. elif key in self.files:
  204. member = True
  205. key += '.npy'
  206. if member:
  207. bytes = self.zip.open(key)
  208. magic = bytes.read(len(format.MAGIC_PREFIX))
  209. bytes.close()
  210. if magic == format.MAGIC_PREFIX:
  211. bytes = self.zip.open(key)
  212. return format.read_array(bytes,
  213. allow_pickle=self.allow_pickle,
  214. pickle_kwargs=self.pickle_kwargs)
  215. else:
  216. return self.zip.read(key)
  217. else:
  218. raise KeyError("%s is not a file in the archive" % key)
  219. # deprecate the python 2 dict apis that we supported by accident in
  220. # python 3. We forgot to implement itervalues() at all in earlier
  221. # versions of numpy, so no need to deprecated it here.
  222. def iteritems(self):
  223. # Numpy 1.15, 2018-02-20
  224. warnings.warn(
  225. "NpzFile.iteritems is deprecated in python 3, to match the "
  226. "removal of dict.itertems. Use .items() instead.",
  227. DeprecationWarning, stacklevel=2)
  228. return self.items()
  229. def iterkeys(self):
  230. # Numpy 1.15, 2018-02-20
  231. warnings.warn(
  232. "NpzFile.iterkeys is deprecated in python 3, to match the "
  233. "removal of dict.iterkeys. Use .keys() instead.",
  234. DeprecationWarning, stacklevel=2)
  235. return self.keys()
  236. @set_module('numpy')
  237. def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True,
  238. encoding='ASCII'):
  239. """
  240. Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
  241. .. warning:: Loading files that contain object arrays uses the ``pickle``
  242. module, which is not secure against erroneous or maliciously
  243. constructed data. Consider passing ``allow_pickle=False`` to
  244. load data that is known not to contain object arrays for the
  245. safer handling of untrusted sources.
  246. Parameters
  247. ----------
  248. file : file-like object, string, or pathlib.Path
  249. The file to read. File-like objects must support the
  250. ``seek()`` and ``read()`` methods. Pickled files require that the
  251. file-like object support the ``readline()`` method as well.
  252. mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
  253. If not None, then memory-map the file, using the given mode (see
  254. `numpy.memmap` for a detailed description of the modes). A
  255. memory-mapped array is kept on disk. However, it can be accessed
  256. and sliced like any ndarray. Memory mapping is especially useful
  257. for accessing small fragments of large files without reading the
  258. entire file into memory.
  259. allow_pickle : bool, optional
  260. Allow loading pickled object arrays stored in npy files. Reasons for
  261. disallowing pickles include security, as loading pickled data can
  262. execute arbitrary code. If pickles are disallowed, loading object
  263. arrays will fail. Default: False
  264. .. versionchanged:: 1.16.3
  265. Made default False in response to CVE-2019-6446.
  266. fix_imports : bool, optional
  267. Only useful when loading Python 2 generated pickled files on Python 3,
  268. which includes npy/npz files containing object arrays. If `fix_imports`
  269. is True, pickle will try to map the old Python 2 names to the new names
  270. used in Python 3.
  271. encoding : str, optional
  272. What encoding to use when reading Python 2 strings. Only useful when
  273. loading Python 2 generated pickled files in Python 3, which includes
  274. npy/npz files containing object arrays. Values other than 'latin1',
  275. 'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
  276. data. Default: 'ASCII'
  277. Returns
  278. -------
  279. result : array, tuple, dict, etc.
  280. Data stored in the file. For ``.npz`` files, the returned instance
  281. of NpzFile class must be closed to avoid leaking file descriptors.
  282. Raises
  283. ------
  284. IOError
  285. If the input file does not exist or cannot be read.
  286. ValueError
  287. The file contains an object array, but allow_pickle=False given.
  288. See Also
  289. --------
  290. save, savez, savez_compressed, loadtxt
  291. memmap : Create a memory-map to an array stored in a file on disk.
  292. lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.
  293. Notes
  294. -----
  295. - If the file contains pickle data, then whatever object is stored
  296. in the pickle is returned.
  297. - If the file is a ``.npy`` file, then a single array is returned.
  298. - If the file is a ``.npz`` file, then a dictionary-like object is
  299. returned, containing ``{filename: array}`` key-value pairs, one for
  300. each file in the archive.
  301. - If the file is a ``.npz`` file, the returned value supports the
  302. context manager protocol in a similar fashion to the open function::
  303. with load('foo.npz') as data:
  304. a = data['a']
  305. The underlying file descriptor is closed when exiting the 'with'
  306. block.
  307. Examples
  308. --------
  309. Store data to disk, and load it again:
  310. >>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
  311. >>> np.load('/tmp/123.npy')
  312. array([[1, 2, 3],
  313. [4, 5, 6]])
  314. Store compressed data to disk, and load it again:
  315. >>> a=np.array([[1, 2, 3], [4, 5, 6]])
  316. >>> b=np.array([1, 2])
  317. >>> np.savez('/tmp/123.npz', a=a, b=b)
  318. >>> data = np.load('/tmp/123.npz')
  319. >>> data['a']
  320. array([[1, 2, 3],
  321. [4, 5, 6]])
  322. >>> data['b']
  323. array([1, 2])
  324. >>> data.close()
  325. Mem-map the stored array, and then access the second row
  326. directly from disk:
  327. >>> X = np.load('/tmp/123.npy', mmap_mode='r')
  328. >>> X[1, :]
  329. memmap([4, 5, 6])
  330. """
  331. if encoding not in ('ASCII', 'latin1', 'bytes'):
  332. # The 'encoding' value for pickle also affects what encoding
  333. # the serialized binary data of NumPy arrays is loaded
  334. # in. Pickle does not pass on the encoding information to
  335. # NumPy. The unpickling code in numpy.core.multiarray is
  336. # written to assume that unicode data appearing where binary
  337. # should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
  338. #
  339. # Other encoding values can corrupt binary data, and we
  340. # purposefully disallow them. For the same reason, the errors=
  341. # argument is not exposed, as values other than 'strict'
  342. # result can similarly silently corrupt numerical data.
  343. raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
  344. pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
  345. with contextlib.ExitStack() as stack:
  346. if hasattr(file, 'read'):
  347. fid = file
  348. own_fid = False
  349. else:
  350. fid = stack.enter_context(open(os_fspath(file), "rb"))
  351. own_fid = True
  352. # Code to distinguish from NumPy binary files and pickles.
  353. _ZIP_PREFIX = b'PK\x03\x04'
  354. _ZIP_SUFFIX = b'PK\x05\x06' # empty zip files start with this
  355. N = len(format.MAGIC_PREFIX)
  356. magic = fid.read(N)
  357. # If the file size is less than N, we need to make sure not
  358. # to seek past the beginning of the file
  359. fid.seek(-min(N, len(magic)), 1) # back-up
  360. if magic.startswith(_ZIP_PREFIX) or magic.startswith(_ZIP_SUFFIX):
  361. # zip-file (assume .npz)
  362. # Potentially transfer file ownership to NpzFile
  363. stack.pop_all()
  364. ret = NpzFile(fid, own_fid=own_fid, allow_pickle=allow_pickle,
  365. pickle_kwargs=pickle_kwargs)
  366. return ret
  367. elif magic == format.MAGIC_PREFIX:
  368. # .npy file
  369. if mmap_mode:
  370. return format.open_memmap(file, mode=mmap_mode)
  371. else:
  372. return format.read_array(fid, allow_pickle=allow_pickle,
  373. pickle_kwargs=pickle_kwargs)
  374. else:
  375. # Try a pickle
  376. if not allow_pickle:
  377. raise ValueError("Cannot load file containing pickled data "
  378. "when allow_pickle=False")
  379. try:
  380. return pickle.load(fid, **pickle_kwargs)
  381. except Exception as e:
  382. raise IOError(
  383. "Failed to interpret file %s as a pickle" % repr(file)) from e
  384. def _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None):
  385. return (arr,)
  386. @array_function_dispatch(_save_dispatcher)
  387. def save(file, arr, allow_pickle=True, fix_imports=True):
  388. """
  389. Save an array to a binary file in NumPy ``.npy`` format.
  390. Parameters
  391. ----------
  392. file : file, str, or pathlib.Path
  393. File or filename to which the data is saved. If file is a file-object,
  394. then the filename is unchanged. If file is a string or Path, a ``.npy``
  395. extension will be appended to the filename if it does not already
  396. have one.
  397. arr : array_like
  398. Array data to be saved.
  399. allow_pickle : bool, optional
  400. Allow saving object arrays using Python pickles. Reasons for disallowing
  401. pickles include security (loading pickled data can execute arbitrary
  402. code) and portability (pickled objects may not be loadable on different
  403. Python installations, for example if the stored objects require libraries
  404. that are not available, and not all pickled data is compatible between
  405. Python 2 and Python 3).
  406. Default: True
  407. fix_imports : bool, optional
  408. Only useful in forcing objects in object arrays on Python 3 to be
  409. pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
  410. will try to map the new Python 3 names to the old module names used in
  411. Python 2, so that the pickle data stream is readable with Python 2.
  412. See Also
  413. --------
  414. savez : Save several arrays into a ``.npz`` archive
  415. savetxt, load
  416. Notes
  417. -----
  418. For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
  419. Any data saved to the file is appended to the end of the file.
  420. Examples
  421. --------
  422. >>> from tempfile import TemporaryFile
  423. >>> outfile = TemporaryFile()
  424. >>> x = np.arange(10)
  425. >>> np.save(outfile, x)
  426. >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file
  427. >>> np.load(outfile)
  428. array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
  429. >>> with open('test.npy', 'wb') as f:
  430. ... np.save(f, np.array([1, 2]))
  431. ... np.save(f, np.array([1, 3]))
  432. >>> with open('test.npy', 'rb') as f:
  433. ... a = np.load(f)
  434. ... b = np.load(f)
  435. >>> print(a, b)
  436. # [1 2] [1 3]
  437. """
  438. if hasattr(file, 'write'):
  439. file_ctx = contextlib.nullcontext(file)
  440. else:
  441. file = os_fspath(file)
  442. if not file.endswith('.npy'):
  443. file = file + '.npy'
  444. file_ctx = open(file, "wb")
  445. with file_ctx as fid:
  446. arr = np.asanyarray(arr)
  447. format.write_array(fid, arr, allow_pickle=allow_pickle,
  448. pickle_kwargs=dict(fix_imports=fix_imports))
  449. def _savez_dispatcher(file, *args, **kwds):
  450. yield from args
  451. yield from kwds.values()
  452. @array_function_dispatch(_savez_dispatcher)
  453. def savez(file, *args, **kwds):
  454. """Save several arrays into a single file in uncompressed ``.npz`` format.
  455. Provide arrays as keyword arguments to store them under the
  456. corresponding name in the output file: ``savez(fn, x=x, y=y)``.
  457. If arrays are specified as positional arguments, i.e., ``savez(fn,
  458. x, y)``, their names will be `arr_0`, `arr_1`, etc.
  459. Parameters
  460. ----------
  461. file : str or file
  462. Either the filename (string) or an open file (file-like object)
  463. where the data will be saved. If file is a string or a Path, the
  464. ``.npz`` extension will be appended to the filename if it is not
  465. already there.
  466. args : Arguments, optional
  467. Arrays to save to the file. Please use keyword arguments (see
  468. `kwds` below) to assign names to arrays. Arrays specified as
  469. args will be named "arr_0", "arr_1", and so on.
  470. kwds : Keyword arguments, optional
  471. Arrays to save to the file. Each array will be saved to the
  472. output file with its corresponding keyword name.
  473. Returns
  474. -------
  475. None
  476. See Also
  477. --------
  478. save : Save a single array to a binary file in NumPy format.
  479. savetxt : Save an array to a file as plain text.
  480. savez_compressed : Save several arrays into a compressed ``.npz`` archive
  481. Notes
  482. -----
  483. The ``.npz`` file format is a zipped archive of files named after the
  484. variables they contain. The archive is not compressed and each file
  485. in the archive contains one variable in ``.npy`` format. For a
  486. description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
  487. When opening the saved ``.npz`` file with `load` a `NpzFile` object is
  488. returned. This is a dictionary-like object which can be queried for
  489. its list of arrays (with the ``.files`` attribute), and for the arrays
  490. themselves.
  491. When saving dictionaries, the dictionary keys become filenames
  492. inside the ZIP archive. Therefore, keys should be valid filenames.
  493. E.g., avoid keys that begin with ``/`` or contain ``.``.
  494. Examples
  495. --------
  496. >>> from tempfile import TemporaryFile
  497. >>> outfile = TemporaryFile()
  498. >>> x = np.arange(10)
  499. >>> y = np.sin(x)
  500. Using `savez` with \\*args, the arrays are saved with default names.
  501. >>> np.savez(outfile, x, y)
  502. >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file
  503. >>> npzfile = np.load(outfile)
  504. >>> npzfile.files
  505. ['arr_0', 'arr_1']
  506. >>> npzfile['arr_0']
  507. array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
  508. Using `savez` with \\**kwds, the arrays are saved with the keyword names.
  509. >>> outfile = TemporaryFile()
  510. >>> np.savez(outfile, x=x, y=y)
  511. >>> _ = outfile.seek(0)
  512. >>> npzfile = np.load(outfile)
  513. >>> sorted(npzfile.files)
  514. ['x', 'y']
  515. >>> npzfile['x']
  516. array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
  517. """
  518. _savez(file, args, kwds, False)
  519. def _savez_compressed_dispatcher(file, *args, **kwds):
  520. yield from args
  521. yield from kwds.values()
  522. @array_function_dispatch(_savez_compressed_dispatcher)
  523. def savez_compressed(file, *args, **kwds):
  524. """
  525. Save several arrays into a single file in compressed ``.npz`` format.
  526. Provide arrays as keyword arguments to store them under the
  527. corresponding name in the output file: ``savez(fn, x=x, y=y)``.
  528. If arrays are specified as positional arguments, i.e., ``savez(fn,
  529. x, y)``, their names will be `arr_0`, `arr_1`, etc.
  530. Parameters
  531. ----------
  532. file : str or file
  533. Either the filename (string) or an open file (file-like object)
  534. where the data will be saved. If file is a string or a Path, the
  535. ``.npz`` extension will be appended to the filename if it is not
  536. already there.
  537. args : Arguments, optional
  538. Arrays to save to the file. Please use keyword arguments (see
  539. `kwds` below) to assign names to arrays. Arrays specified as
  540. args will be named "arr_0", "arr_1", and so on.
  541. kwds : Keyword arguments, optional
  542. Arrays to save to the file. Each array will be saved to the
  543. output file with its corresponding keyword name.
  544. Returns
  545. -------
  546. None
  547. See Also
  548. --------
  549. numpy.save : Save a single array to a binary file in NumPy format.
  550. numpy.savetxt : Save an array to a file as plain text.
  551. numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
  552. numpy.load : Load the files created by savez_compressed.
  553. Notes
  554. -----
  555. The ``.npz`` file format is a zipped archive of files named after the
  556. variables they contain. The archive is compressed with
  557. ``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable
  558. in ``.npy`` format. For a description of the ``.npy`` format, see
  559. :py:mod:`numpy.lib.format`.
  560. When opening the saved ``.npz`` file with `load` a `NpzFile` object is
  561. returned. This is a dictionary-like object which can be queried for
  562. its list of arrays (with the ``.files`` attribute), and for the arrays
  563. themselves.
  564. Examples
  565. --------
  566. >>> test_array = np.random.rand(3, 2)
  567. >>> test_vector = np.random.rand(4)
  568. >>> np.savez_compressed('/tmp/123', a=test_array, b=test_vector)
  569. >>> loaded = np.load('/tmp/123.npz')
  570. >>> print(np.array_equal(test_array, loaded['a']))
  571. True
  572. >>> print(np.array_equal(test_vector, loaded['b']))
  573. True
  574. """
  575. _savez(file, args, kwds, True)
  576. def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
  577. # Import is postponed to here since zipfile depends on gzip, an optional
  578. # component of the so-called standard library.
  579. import zipfile
  580. if not hasattr(file, 'write'):
  581. file = os_fspath(file)
  582. if not file.endswith('.npz'):
  583. file = file + '.npz'
  584. namedict = kwds
  585. for i, val in enumerate(args):
  586. key = 'arr_%d' % i
  587. if key in namedict.keys():
  588. raise ValueError(
  589. "Cannot use un-named variables and keyword %s" % key)
  590. namedict[key] = val
  591. if compress:
  592. compression = zipfile.ZIP_DEFLATED
  593. else:
  594. compression = zipfile.ZIP_STORED
  595. zipf = zipfile_factory(file, mode="w", compression=compression)
  596. for key, val in namedict.items():
  597. fname = key + '.npy'
  598. val = np.asanyarray(val)
  599. # always force zip64, gh-10776
  600. with zipf.open(fname, 'w', force_zip64=True) as fid:
  601. format.write_array(fid, val,
  602. allow_pickle=allow_pickle,
  603. pickle_kwargs=pickle_kwargs)
  604. zipf.close()
  605. def _getconv(dtype):
  606. """ Find the correct dtype converter. Adapted from matplotlib """
  607. def floatconv(x):
  608. x.lower()
  609. if '0x' in x:
  610. return float.fromhex(x)
  611. return float(x)
  612. typ = dtype.type
  613. if issubclass(typ, np.bool_):
  614. return lambda x: bool(int(x))
  615. if issubclass(typ, np.uint64):
  616. return np.uint64
  617. if issubclass(typ, np.int64):
  618. return np.int64
  619. if issubclass(typ, np.integer):
  620. return lambda x: int(float(x))
  621. elif issubclass(typ, np.longdouble):
  622. return np.longdouble
  623. elif issubclass(typ, np.floating):
  624. return floatconv
  625. elif issubclass(typ, complex):
  626. return lambda x: complex(asstr(x).replace('+-', '-'))
  627. elif issubclass(typ, np.bytes_):
  628. return asbytes
  629. elif issubclass(typ, np.unicode_):
  630. return asunicode
  631. else:
  632. return asstr
  633. # amount of lines loadtxt reads in one chunk, can be overridden for testing
  634. _loadtxt_chunksize = 50000
  635. def _loadtxt_dispatcher(fname, dtype=None, comments=None, delimiter=None,
  636. converters=None, skiprows=None, usecols=None, unpack=None,
  637. ndmin=None, encoding=None, max_rows=None, *, like=None):
  638. return (like,)
  639. @set_array_function_like_doc
  640. @set_module('numpy')
  641. def loadtxt(fname, dtype=float, comments='#', delimiter=None,
  642. converters=None, skiprows=0, usecols=None, unpack=False,
  643. ndmin=0, encoding='bytes', max_rows=None, *, like=None):
  644. r"""
  645. Load data from a text file.
  646. Each row in the text file must have the same number of values.
  647. Parameters
  648. ----------
  649. fname : file, str, or pathlib.Path
  650. File, filename, or generator to read. If the filename extension is
  651. ``.gz`` or ``.bz2``, the file is first decompressed. Note that
  652. generators should return byte strings.
  653. dtype : data-type, optional
  654. Data-type of the resulting array; default: float. If this is a
  655. structured data-type, the resulting array will be 1-dimensional, and
  656. each row will be interpreted as an element of the array. In this
  657. case, the number of columns used must match the number of fields in
  658. the data-type.
  659. comments : str or sequence of str, optional
  660. The characters or list of characters used to indicate the start of a
  661. comment. None implies no comments. For backwards compatibility, byte
  662. strings will be decoded as 'latin1'. The default is '#'.
  663. delimiter : str, optional
  664. The string used to separate values. For backwards compatibility, byte
  665. strings will be decoded as 'latin1'. The default is whitespace.
  666. converters : dict, optional
  667. A dictionary mapping column number to a function that will parse the
  668. column string into the desired value. E.g., if column 0 is a date
  669. string: ``converters = {0: datestr2num}``. Converters can also be
  670. used to provide a default value for missing data (but see also
  671. `genfromtxt`): ``converters = {3: lambda s: float(s.strip() or 0)}``.
  672. Default: None.
  673. skiprows : int, optional
  674. Skip the first `skiprows` lines, including comments; default: 0.
  675. usecols : int or sequence, optional
  676. Which columns to read, with 0 being the first. For example,
  677. ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
  678. The default, None, results in all columns being read.
  679. .. versionchanged:: 1.11.0
  680. When a single column has to be read it is possible to use
  681. an integer instead of a tuple. E.g ``usecols = 3`` reads the
  682. fourth column the same way as ``usecols = (3,)`` would.
  683. unpack : bool, optional
  684. If True, the returned array is transposed, so that arguments may be
  685. unpacked using ``x, y, z = loadtxt(...)``. When used with a
  686. structured data-type, arrays are returned for each field.
  687. Default is False.
  688. ndmin : int, optional
  689. The returned array will have at least `ndmin` dimensions.
  690. Otherwise mono-dimensional axes will be squeezed.
  691. Legal values: 0 (default), 1 or 2.
  692. .. versionadded:: 1.6.0
  693. encoding : str, optional
  694. Encoding used to decode the inputfile. Does not apply to input streams.
  695. The special value 'bytes' enables backward compatibility workarounds
  696. that ensures you receive byte arrays as results if possible and passes
  697. 'latin1' encoded strings to converters. Override this value to receive
  698. unicode arrays and pass strings as input to converters. If set to None
  699. the system default is used. The default value is 'bytes'.
  700. .. versionadded:: 1.14.0
  701. max_rows : int, optional
  702. Read `max_rows` lines of content after `skiprows` lines. The default
  703. is to read all the lines.
  704. .. versionadded:: 1.16.0
  705. ${ARRAY_FUNCTION_LIKE}
  706. .. versionadded:: 1.20.0
  707. Returns
  708. -------
  709. out : ndarray
  710. Data read from the text file.
  711. See Also
  712. --------
  713. load, fromstring, fromregex
  714. genfromtxt : Load data with missing values handled as specified.
  715. scipy.io.loadmat : reads MATLAB data files
  716. Notes
  717. -----
  718. This function aims to be a fast reader for simply formatted files. The
  719. `genfromtxt` function provides more sophisticated handling of, e.g.,
  720. lines with missing values.
  721. .. versionadded:: 1.10.0
  722. The strings produced by the Python float.hex method can be used as
  723. input for floats.
  724. Examples
  725. --------
  726. >>> from io import StringIO # StringIO behaves like a file object
  727. >>> c = StringIO("0 1\n2 3")
  728. >>> np.loadtxt(c)
  729. array([[0., 1.],
  730. [2., 3.]])
  731. >>> d = StringIO("M 21 72\nF 35 58")
  732. >>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
  733. ... 'formats': ('S1', 'i4', 'f4')})
  734. array([(b'M', 21, 72.), (b'F', 35, 58.)],
  735. dtype=[('gender', 'S1'), ('age', '<i4'), ('weight', '<f4')])
  736. >>> c = StringIO("1,0,2\n3,0,4")
  737. >>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
  738. >>> x
  739. array([1., 3.])
  740. >>> y
  741. array([2., 4.])
  742. This example shows how `converters` can be used to convert a field
  743. with a trailing minus sign into a negative number.
  744. >>> s = StringIO('10.01 31.25-\n19.22 64.31\n17.57- 63.94')
  745. >>> def conv(fld):
  746. ... return -float(fld[:-1]) if fld.endswith(b'-') else float(fld)
  747. ...
  748. >>> np.loadtxt(s, converters={0: conv, 1: conv})
  749. array([[ 10.01, -31.25],
  750. [ 19.22, 64.31],
  751. [-17.57, 63.94]])
  752. """
  753. if like is not None:
  754. return _loadtxt_with_like(
  755. fname, dtype=dtype, comments=comments, delimiter=delimiter,
  756. converters=converters, skiprows=skiprows, usecols=usecols,
  757. unpack=unpack, ndmin=ndmin, encoding=encoding,
  758. max_rows=max_rows, like=like
  759. )
  760. # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
  761. # Nested functions used by loadtxt.
  762. # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
  763. # not to be confused with the flatten_dtype we import...
  764. @recursive
  765. def flatten_dtype_internal(self, dt):
  766. """Unpack a structured data-type, and produce re-packing info."""
  767. if dt.names is None:
  768. # If the dtype is flattened, return.
  769. # If the dtype has a shape, the dtype occurs
  770. # in the list more than once.
  771. shape = dt.shape
  772. if len(shape) == 0:
  773. return ([dt.base], None)
  774. else:
  775. packing = [(shape[-1], list)]
  776. if len(shape) > 1:
  777. for dim in dt.shape[-2::-1]:
  778. packing = [(dim*packing[0][0], packing*dim)]
  779. return ([dt.base] * int(np.prod(dt.shape)), packing)
  780. else:
  781. types = []
  782. packing = []
  783. for field in dt.names:
  784. tp, bytes = dt.fields[field]
  785. flat_dt, flat_packing = self(tp)
  786. types.extend(flat_dt)
  787. # Avoid extra nesting for subarrays
  788. if tp.ndim > 0:
  789. packing.extend(flat_packing)
  790. else:
  791. packing.append((len(flat_dt), flat_packing))
  792. return (types, packing)
  793. @recursive
  794. def pack_items(self, items, packing):
  795. """Pack items into nested lists based on re-packing info."""
  796. if packing is None:
  797. return items[0]
  798. elif packing is tuple:
  799. return tuple(items)
  800. elif packing is list:
  801. return list(items)
  802. else:
  803. start = 0
  804. ret = []
  805. for length, subpacking in packing:
  806. ret.append(self(items[start:start+length], subpacking))
  807. start += length
  808. return tuple(ret)
  809. def split_line(line):
  810. """Chop off comments, strip, and split at delimiter. """
  811. line = _decode_line(line, encoding=encoding)
  812. if comments is not None:
  813. line = regex_comments.split(line, maxsplit=1)[0]
  814. line = line.strip('\r\n')
  815. return line.split(delimiter) if line else []
  816. def read_data(chunk_size):
  817. """Parse each line, including the first.
  818. The file read, `fh`, is a global defined above.
  819. Parameters
  820. ----------
  821. chunk_size : int
  822. At most `chunk_size` lines are read at a time, with iteration
  823. until all lines are read.
  824. """
  825. X = []
  826. line_iter = itertools.chain([first_line], fh)
  827. line_iter = itertools.islice(line_iter, max_rows)
  828. for i, line in enumerate(line_iter):
  829. vals = split_line(line)
  830. if len(vals) == 0:
  831. continue
  832. if usecols:
  833. vals = [vals[j] for j in usecols]
  834. if len(vals) != N:
  835. line_num = i + skiprows + 1
  836. raise ValueError("Wrong number of columns at line %d"
  837. % line_num)
  838. # Convert each value according to its column and store
  839. items = [conv(val) for (conv, val) in zip(converters, vals)]
  840. # Then pack it according to the dtype's nesting
  841. items = pack_items(items, packing)
  842. X.append(items)
  843. if len(X) > chunk_size:
  844. yield X
  845. X = []
  846. if X:
  847. yield X
  848. # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
  849. # Main body of loadtxt.
  850. # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
  851. # Check correctness of the values of `ndmin`
  852. if ndmin not in [0, 1, 2]:
  853. raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
  854. # Type conversions for Py3 convenience
  855. if comments is not None:
  856. if isinstance(comments, (str, bytes)):
  857. comments = [comments]
  858. comments = [_decode_line(x) for x in comments]
  859. # Compile regex for comments beforehand
  860. comments = (re.escape(comment) for comment in comments)
  861. regex_comments = re.compile('|'.join(comments))
  862. if delimiter is not None:
  863. delimiter = _decode_line(delimiter)
  864. user_converters = converters
  865. byte_converters = False
  866. if encoding == 'bytes':
  867. encoding = None
  868. byte_converters = True
  869. if usecols is not None:
  870. # Allow usecols to be a single int or a sequence of ints
  871. try:
  872. usecols_as_list = list(usecols)
  873. except TypeError:
  874. usecols_as_list = [usecols]
  875. for col_idx in usecols_as_list:
  876. try:
  877. opindex(col_idx)
  878. except TypeError as e:
  879. e.args = (
  880. "usecols must be an int or a sequence of ints but "
  881. "it contains at least one element of type %s" %
  882. type(col_idx),
  883. )
  884. raise
  885. # Fall back to existing code
  886. usecols = usecols_as_list
  887. # Make sure we're dealing with a proper dtype
  888. dtype = np.dtype(dtype)
  889. defconv = _getconv(dtype)
  890. dtype_types, packing = flatten_dtype_internal(dtype)
  891. fown = False
  892. try:
  893. if isinstance(fname, os_PathLike):
  894. fname = os_fspath(fname)
  895. if _is_string_like(fname):
  896. fh = np.lib._datasource.open(fname, 'rt', encoding=encoding)
  897. fencoding = getattr(fh, 'encoding', 'latin1')
  898. fh = iter(fh)
  899. fown = True
  900. else:
  901. fh = iter(fname)
  902. fencoding = getattr(fname, 'encoding', 'latin1')
  903. except TypeError as e:
  904. raise ValueError(
  905. 'fname must be a string, file handle, or generator'
  906. ) from e
  907. # input may be a python2 io stream
  908. if encoding is not None:
  909. fencoding = encoding
  910. # we must assume local encoding
  911. # TODO emit portability warning?
  912. elif fencoding is None:
  913. import locale
  914. fencoding = locale.getpreferredencoding()
  915. try:
  916. # Skip the first `skiprows` lines
  917. for i in range(skiprows):
  918. next(fh)
  919. # Read until we find a line with some values, and use
  920. # it to estimate the number of columns, N.
  921. first_vals = None
  922. try:
  923. while not first_vals:
  924. first_line = next(fh)
  925. first_vals = split_line(first_line)
  926. except StopIteration:
  927. # End of lines reached
  928. first_line = ''
  929. first_vals = []
  930. warnings.warn('loadtxt: Empty input file: "%s"' % fname,
  931. stacklevel=2)
  932. N = len(usecols or first_vals)
  933. # Now that we know N, create the default converters list, and
  934. # set packing, if necessary.
  935. if len(dtype_types) > 1:
  936. # We're dealing with a structured array, each field of
  937. # the dtype matches a column
  938. converters = [_getconv(dt) for dt in dtype_types]
  939. else:
  940. # All fields have the same dtype
  941. converters = [defconv for i in range(N)]
  942. if N > 1:
  943. packing = [(N, tuple)]
  944. # By preference, use the converters specified by the user
  945. for i, conv in (user_converters or {}).items():
  946. if usecols:
  947. try:
  948. i = usecols.index(i)
  949. except ValueError:
  950. # Unused converter specified
  951. continue
  952. if byte_converters:
  953. # converters may use decode to workaround numpy's old
  954. # behaviour, so encode the string again before passing to
  955. # the user converter
  956. def tobytes_first(x, conv):
  957. if type(x) is bytes:
  958. return conv(x)
  959. return conv(x.encode("latin1"))
  960. converters[i] = functools.partial(tobytes_first, conv=conv)
  961. else:
  962. converters[i] = conv
  963. converters = [conv if conv is not bytes else
  964. lambda x: x.encode(fencoding) for conv in converters]
  965. # read data in chunks and fill it into an array via resize
  966. # over-allocating and shrinking the array later may be faster but is
  967. # probably not relevant compared to the cost of actually reading and
  968. # converting the data
  969. X = None
  970. for x in read_data(_loadtxt_chunksize):
  971. if X is None:
  972. X = np.array(x, dtype)
  973. else:
  974. nshape = list(X.shape)
  975. pos = nshape[0]
  976. nshape[0] += len(x)
  977. X.resize(nshape, refcheck=False)
  978. X[pos:, ...] = x
  979. finally:
  980. if fown:
  981. fh.close()
  982. if X is None:
  983. X = np.array([], dtype)
  984. # Multicolumn data are returned with shape (1, N, M), i.e.
  985. # (1, 1, M) for a single row - remove the singleton dimension there
  986. if X.ndim == 3 and X.shape[:2] == (1, 1):
  987. X.shape = (1, -1)
  988. # Verify that the array has at least dimensions `ndmin`.
  989. # Tweak the size and shape of the arrays - remove extraneous dimensions
  990. if X.ndim > ndmin:
  991. X = np.squeeze(X)
  992. # and ensure we have the minimum number of dimensions asked for
  993. # - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
  994. if X.ndim < ndmin:
  995. if ndmin == 1:
  996. X = np.atleast_1d(X)
  997. elif ndmin == 2:
  998. X = np.atleast_2d(X).T
  999. if unpack:
  1000. if len(dtype_types) > 1:
  1001. # For structured arrays, return an array for each field.
  1002. return [X[field] for field in dtype.names]
  1003. else:
  1004. return X.T
  1005. else:
  1006. return X
  1007. _loadtxt_with_like = array_function_dispatch(
  1008. _loadtxt_dispatcher
  1009. )(loadtxt)
  1010. def _savetxt_dispatcher(fname, X, fmt=None, delimiter=None, newline=None,
  1011. header=None, footer=None, comments=None,
  1012. encoding=None):
  1013. return (X,)
  1014. @array_function_dispatch(_savetxt_dispatcher)
  1015. def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
  1016. footer='', comments='# ', encoding=None):
  1017. """
  1018. Save an array to a text file.
  1019. Parameters
  1020. ----------
  1021. fname : filename or file handle
  1022. If the filename ends in ``.gz``, the file is automatically saved in
  1023. compressed gzip format. `loadtxt` understands gzipped files
  1024. transparently.
  1025. X : 1D or 2D array_like
  1026. Data to be saved to a text file.
  1027. fmt : str or sequence of strs, optional
  1028. A single format (%10.5f), a sequence of formats, or a
  1029. multi-format string, e.g. 'Iteration %d -- %10.5f', in which
  1030. case `delimiter` is ignored. For complex `X`, the legal options
  1031. for `fmt` are:
  1032. * a single specifier, `fmt='%.4e'`, resulting in numbers formatted
  1033. like `' (%s+%sj)' % (fmt, fmt)`
  1034. * a full string specifying every real and imaginary part, e.g.
  1035. `' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns
  1036. * a list of specifiers, one per column - in this case, the real
  1037. and imaginary part must have separate specifiers,
  1038. e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
  1039. delimiter : str, optional
  1040. String or character separating columns.
  1041. newline : str, optional
  1042. String or character separating lines.
  1043. .. versionadded:: 1.5.0
  1044. header : str, optional
  1045. String that will be written at the beginning of the file.
  1046. .. versionadded:: 1.7.0
  1047. footer : str, optional
  1048. String that will be written at the end of the file.
  1049. .. versionadded:: 1.7.0
  1050. comments : str, optional
  1051. String that will be prepended to the ``header`` and ``footer`` strings,
  1052. to mark them as comments. Default: '# ', as expected by e.g.
  1053. ``numpy.loadtxt``.
  1054. .. versionadded:: 1.7.0
  1055. encoding : {None, str}, optional
  1056. Encoding used to encode the outputfile. Does not apply to output
  1057. streams. If the encoding is something other than 'bytes' or 'latin1'
  1058. you will not be able to load the file in NumPy versions < 1.14. Default
  1059. is 'latin1'.
  1060. .. versionadded:: 1.14.0
  1061. See Also
  1062. --------
  1063. save : Save an array to a binary file in NumPy ``.npy`` format
  1064. savez : Save several arrays into an uncompressed ``.npz`` archive
  1065. savez_compressed : Save several arrays into a compressed ``.npz`` archive
  1066. Notes
  1067. -----
  1068. Further explanation of the `fmt` parameter
  1069. (``%[flag]width[.precision]specifier``):
  1070. flags:
  1071. ``-`` : left justify
  1072. ``+`` : Forces to precede result with + or -.
  1073. ``0`` : Left pad the number with zeros instead of space (see width).
  1074. width:
  1075. Minimum number of characters to be printed. The value is not truncated
  1076. if it has more characters.
  1077. precision:
  1078. - For integer specifiers (eg. ``d,i,o,x``), the minimum number of
  1079. digits.
  1080. - For ``e, E`` and ``f`` specifiers, the number of digits to print
  1081. after the decimal point.
  1082. - For ``g`` and ``G``, the maximum number of significant digits.
  1083. - For ``s``, the maximum number of characters.
  1084. specifiers:
  1085. ``c`` : character
  1086. ``d`` or ``i`` : signed decimal integer
  1087. ``e`` or ``E`` : scientific notation with ``e`` or ``E``.
  1088. ``f`` : decimal floating point
  1089. ``g,G`` : use the shorter of ``e,E`` or ``f``
  1090. ``o`` : signed octal
  1091. ``s`` : string of characters
  1092. ``u`` : unsigned decimal integer
  1093. ``x,X`` : unsigned hexadecimal integer
  1094. This explanation of ``fmt`` is not complete, for an exhaustive
  1095. specification see [1]_.
  1096. References
  1097. ----------
  1098. .. [1] `Format Specification Mini-Language
  1099. <https://docs.python.org/library/string.html#format-specification-mini-language>`_,
  1100. Python Documentation.
  1101. Examples
  1102. --------
  1103. >>> x = y = z = np.arange(0.0,5.0,1.0)
  1104. >>> np.savetxt('test.out', x, delimiter=',') # X is an array
  1105. >>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
  1106. >>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
  1107. """
  1108. # Py3 conversions first
  1109. if isinstance(fmt, bytes):
  1110. fmt = asstr(fmt)
  1111. delimiter = asstr(delimiter)
  1112. class WriteWrap:
  1113. """Convert to bytes on bytestream inputs.
  1114. """
  1115. def __init__(self, fh, encoding):
  1116. self.fh = fh
  1117. self.encoding = encoding
  1118. self.do_write = self.first_write
  1119. def close(self):
  1120. self.fh.close()
  1121. def write(self, v):
  1122. self.do_write(v)
  1123. def write_bytes(self, v):
  1124. if isinstance(v, bytes):
  1125. self.fh.write(v)
  1126. else:
  1127. self.fh.write(v.encode(self.encoding))
  1128. def write_normal(self, v):
  1129. self.fh.write(asunicode(v))
  1130. def first_write(self, v):
  1131. try:
  1132. self.write_normal(v)
  1133. self.write = self.write_normal
  1134. except TypeError:
  1135. # input is probably a bytestream
  1136. self.write_bytes(v)
  1137. self.write = self.write_bytes
  1138. own_fh = False
  1139. if isinstance(fname, os_PathLike):
  1140. fname = os_fspath(fname)
  1141. if _is_string_like(fname):
  1142. # datasource doesn't support creating a new file ...
  1143. open(fname, 'wt').close()
  1144. fh = np.lib._datasource.open(fname, 'wt', encoding=encoding)
  1145. own_fh = True
  1146. elif hasattr(fname, 'write'):
  1147. # wrap to handle byte output streams
  1148. fh = WriteWrap(fname, encoding or 'latin1')
  1149. else:
  1150. raise ValueError('fname must be a string or file handle')
  1151. try:
  1152. X = np.asarray(X)
  1153. # Handle 1-dimensional arrays
  1154. if X.ndim == 0 or X.ndim > 2:
  1155. raise ValueError(
  1156. "Expected 1D or 2D array, got %dD array instead" % X.ndim)
  1157. elif X.ndim == 1:
  1158. # Common case -- 1d array of numbers
  1159. if X.dtype.names is None:
  1160. X = np.atleast_2d(X).T
  1161. ncol = 1
  1162. # Complex dtype -- each field indicates a separate column
  1163. else:
  1164. ncol = len(X.dtype.names)
  1165. else:
  1166. ncol = X.shape[1]
  1167. iscomplex_X = np.iscomplexobj(X)
  1168. # `fmt` can be a string with multiple insertion points or a
  1169. # list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
  1170. if type(fmt) in (list, tuple):
  1171. if len(fmt) != ncol:
  1172. raise AttributeError('fmt has wrong shape. %s' % str(fmt))
  1173. format = asstr(delimiter).join(map(asstr, fmt))
  1174. elif isinstance(fmt, str):
  1175. n_fmt_chars = fmt.count('%')
  1176. error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
  1177. if n_fmt_chars == 1:
  1178. if iscomplex_X:
  1179. fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
  1180. else:
  1181. fmt = [fmt, ] * ncol
  1182. format = delimiter.join(fmt)
  1183. elif iscomplex_X and n_fmt_chars != (2 * ncol):
  1184. raise error
  1185. elif ((not iscomplex_X) and n_fmt_chars != ncol):
  1186. raise error
  1187. else:
  1188. format = fmt
  1189. else:
  1190. raise ValueError('invalid fmt: %r' % (fmt,))
  1191. if len(header) > 0:
  1192. header = header.replace('\n', '\n' + comments)
  1193. fh.write(comments + header + newline)
  1194. if iscomplex_X:
  1195. for row in X:
  1196. row2 = []
  1197. for number in row:
  1198. row2.append(number.real)
  1199. row2.append(number.imag)
  1200. s = format % tuple(row2) + newline
  1201. fh.write(s.replace('+-', '-'))
  1202. else:
  1203. for row in X:
  1204. try:
  1205. v = format % tuple(row) + newline
  1206. except TypeError as e:
  1207. raise TypeError("Mismatch between array dtype ('%s') and "
  1208. "format specifier ('%s')"
  1209. % (str(X.dtype), format)) from e
  1210. fh.write(v)
  1211. if len(footer) > 0:
  1212. footer = footer.replace('\n', '\n' + comments)
  1213. fh.write(comments + footer + newline)
  1214. finally:
  1215. if own_fh:
  1216. fh.close()
  1217. @set_module('numpy')
  1218. def fromregex(file, regexp, dtype, encoding=None):
  1219. """
  1220. Construct an array from a text file, using regular expression parsing.
  1221. The returned array is always a structured array, and is constructed from
  1222. all matches of the regular expression in the file. Groups in the regular
  1223. expression are converted to fields of the structured array.
  1224. Parameters
  1225. ----------
  1226. file : str or file
  1227. Filename or file object to read.
  1228. regexp : str or regexp
  1229. Regular expression used to parse the file.
  1230. Groups in the regular expression correspond to fields in the dtype.
  1231. dtype : dtype or list of dtypes
  1232. Dtype for the structured array.
  1233. encoding : str, optional
  1234. Encoding used to decode the inputfile. Does not apply to input streams.
  1235. .. versionadded:: 1.14.0
  1236. Returns
  1237. -------
  1238. output : ndarray
  1239. The output array, containing the part of the content of `file` that
  1240. was matched by `regexp`. `output` is always a structured array.
  1241. Raises
  1242. ------
  1243. TypeError
  1244. When `dtype` is not a valid dtype for a structured array.
  1245. See Also
  1246. --------
  1247. fromstring, loadtxt
  1248. Notes
  1249. -----
  1250. Dtypes for structured arrays can be specified in several forms, but all
  1251. forms specify at least the data type and field name. For details see
  1252. `basics.rec`.
  1253. Examples
  1254. --------
  1255. >>> f = open('test.dat', 'w')
  1256. >>> _ = f.write("1312 foo\\n1534 bar\\n444 qux")
  1257. >>> f.close()
  1258. >>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
  1259. >>> output = np.fromregex('test.dat', regexp,
  1260. ... [('num', np.int64), ('key', 'S3')])
  1261. >>> output
  1262. array([(1312, b'foo'), (1534, b'bar'), ( 444, b'qux')],
  1263. dtype=[('num', '<i8'), ('key', 'S3')])
  1264. >>> output['num']
  1265. array([1312, 1534, 444])
  1266. """
  1267. own_fh = False
  1268. if not hasattr(file, "read"):
  1269. file = np.lib._datasource.open(file, 'rt', encoding=encoding)
  1270. own_fh = True
  1271. try:
  1272. if not isinstance(dtype, np.dtype):
  1273. dtype = np.dtype(dtype)
  1274. content = file.read()
  1275. if isinstance(content, bytes) and isinstance(regexp, np.compat.unicode):
  1276. regexp = asbytes(regexp)
  1277. elif isinstance(content, np.compat.unicode) and isinstance(regexp, bytes):
  1278. regexp = asstr(regexp)
  1279. if not hasattr(regexp, 'match'):
  1280. regexp = re.compile(regexp)
  1281. seq = regexp.findall(content)
  1282. if seq and not isinstance(seq[0], tuple):
  1283. # Only one group is in the regexp.
  1284. # Create the new array as a single data-type and then
  1285. # re-interpret as a single-field structured array.
  1286. newdtype = np.dtype(dtype[dtype.names[0]])
  1287. output = np.array(seq, dtype=newdtype)
  1288. output.dtype = dtype
  1289. else:
  1290. output = np.array(seq, dtype=dtype)
  1291. return output
  1292. finally:
  1293. if own_fh:
  1294. file.close()
  1295. #####--------------------------------------------------------------------------
  1296. #---- --- ASCII functions ---
  1297. #####--------------------------------------------------------------------------
  1298. def _genfromtxt_dispatcher(fname, dtype=None, comments=None, delimiter=None,
  1299. skip_header=None, skip_footer=None, converters=None,
  1300. missing_values=None, filling_values=None, usecols=None,
  1301. names=None, excludelist=None, deletechars=None,
  1302. replace_space=None, autostrip=None, case_sensitive=None,
  1303. defaultfmt=None, unpack=None, usemask=None, loose=None,
  1304. invalid_raise=None, max_rows=None, encoding=None, *,
  1305. like=None):
  1306. return (like,)
  1307. @set_array_function_like_doc
  1308. @set_module('numpy')
  1309. def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
  1310. skip_header=0, skip_footer=0, converters=None,
  1311. missing_values=None, filling_values=None, usecols=None,
  1312. names=None, excludelist=None,
  1313. deletechars=''.join(sorted(NameValidator.defaultdeletechars)),
  1314. replace_space='_', autostrip=False, case_sensitive=True,
  1315. defaultfmt="f%i", unpack=None, usemask=False, loose=True,
  1316. invalid_raise=True, max_rows=None, encoding='bytes', *,
  1317. like=None):
  1318. """
  1319. Load data from a text file, with missing values handled as specified.
  1320. Each line past the first `skip_header` lines is split at the `delimiter`
  1321. character, and characters following the `comments` character are discarded.
  1322. Parameters
  1323. ----------
  1324. fname : file, str, pathlib.Path, list of str, generator
  1325. File, filename, list, or generator to read. If the filename
  1326. extension is `.gz` or `.bz2`, the file is first decompressed. Note
  1327. that generators must return byte strings. The strings
  1328. in a list or produced by a generator are treated as lines.
  1329. dtype : dtype, optional
  1330. Data type of the resulting array.
  1331. If None, the dtypes will be determined by the contents of each
  1332. column, individually.
  1333. comments : str, optional
  1334. The character used to indicate the start of a comment.
  1335. All the characters occurring on a line after a comment are discarded.
  1336. delimiter : str, int, or sequence, optional
  1337. The string used to separate values. By default, any consecutive
  1338. whitespaces act as delimiter. An integer or sequence of integers
  1339. can also be provided as width(s) of each field.
  1340. skiprows : int, optional
  1341. `skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
  1342. skip_header : int, optional
  1343. The number of lines to skip at the beginning of the file.
  1344. skip_footer : int, optional
  1345. The number of lines to skip at the end of the file.
  1346. converters : variable, optional
  1347. The set of functions that convert the data of a column to a value.
  1348. The converters can also be used to provide a default value
  1349. for missing data: ``converters = {3: lambda s: float(s or 0)}``.
  1350. missing : variable, optional
  1351. `missing` was removed in numpy 1.10. Please use `missing_values`
  1352. instead.
  1353. missing_values : variable, optional
  1354. The set of strings corresponding to missing data.
  1355. filling_values : variable, optional
  1356. The set of values to be used as default when the data are missing.
  1357. usecols : sequence, optional
  1358. Which columns to read, with 0 being the first. For example,
  1359. ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
  1360. names : {None, True, str, sequence}, optional
  1361. If `names` is True, the field names are read from the first line after
  1362. the first `skip_header` lines. This line can optionally be preceeded
  1363. by a comment delimiter. If `names` is a sequence or a single-string of
  1364. comma-separated names, the names will be used to define the field names
  1365. in a structured dtype. If `names` is None, the names of the dtype
  1366. fields will be used, if any.
  1367. excludelist : sequence, optional
  1368. A list of names to exclude. This list is appended to the default list
  1369. ['return','file','print']. Excluded names are appended with an
  1370. underscore: for example, `file` would become `file_`.
  1371. deletechars : str, optional
  1372. A string combining invalid characters that must be deleted from the
  1373. names.
  1374. defaultfmt : str, optional
  1375. A format used to define default field names, such as "f%i" or "f_%02i".
  1376. autostrip : bool, optional
  1377. Whether to automatically strip white spaces from the variables.
  1378. replace_space : char, optional
  1379. Character(s) used in replacement of white spaces in the variable
  1380. names. By default, use a '_'.
  1381. case_sensitive : {True, False, 'upper', 'lower'}, optional
  1382. If True, field names are case sensitive.
  1383. If False or 'upper', field names are converted to upper case.
  1384. If 'lower', field names are converted to lower case.
  1385. unpack : bool, optional
  1386. If True, the returned array is transposed, so that arguments may be
  1387. unpacked using ``x, y, z = genfromtxt(...)``. When used with a
  1388. structured data-type, arrays are returned for each field.
  1389. Default is False.
  1390. usemask : bool, optional
  1391. If True, return a masked array.
  1392. If False, return a regular array.
  1393. loose : bool, optional
  1394. If True, do not raise errors for invalid values.
  1395. invalid_raise : bool, optional
  1396. If True, an exception is raised if an inconsistency is detected in the
  1397. number of columns.
  1398. If False, a warning is emitted and the offending lines are skipped.
  1399. max_rows : int, optional
  1400. The maximum number of rows to read. Must not be used with skip_footer
  1401. at the same time. If given, the value must be at least 1. Default is
  1402. to read the entire file.
  1403. .. versionadded:: 1.10.0
  1404. encoding : str, optional
  1405. Encoding used to decode the inputfile. Does not apply when `fname` is
  1406. a file object. The special value 'bytes' enables backward compatibility
  1407. workarounds that ensure that you receive byte arrays when possible
  1408. and passes latin1 encoded strings to converters. Override this value to
  1409. receive unicode arrays and pass strings as input to converters. If set
  1410. to None the system default is used. The default value is 'bytes'.
  1411. .. versionadded:: 1.14.0
  1412. ${ARRAY_FUNCTION_LIKE}
  1413. .. versionadded:: 1.20.0
  1414. Returns
  1415. -------
  1416. out : ndarray
  1417. Data read from the text file. If `usemask` is True, this is a
  1418. masked array.
  1419. See Also
  1420. --------
  1421. numpy.loadtxt : equivalent function when no data is missing.
  1422. Notes
  1423. -----
  1424. * When spaces are used as delimiters, or when no delimiter has been given
  1425. as input, there should not be any missing data between two fields.
  1426. * When the variables are named (either by a flexible dtype or with `names`),
  1427. there must not be any header in the file (else a ValueError
  1428. exception is raised).
  1429. * Individual values are not stripped of spaces by default.
  1430. When using a custom converter, make sure the function does remove spaces.
  1431. References
  1432. ----------
  1433. .. [1] NumPy User Guide, section `I/O with NumPy
  1434. <https://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
  1435. Examples
  1436. --------
  1437. >>> from io import StringIO
  1438. >>> import numpy as np
  1439. Comma delimited file with mixed dtype
  1440. >>> s = StringIO(u"1,1.3,abcde")
  1441. >>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
  1442. ... ('mystring','S5')], delimiter=",")
  1443. >>> data
  1444. array((1, 1.3, b'abcde'),
  1445. dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
  1446. Using dtype = None
  1447. >>> _ = s.seek(0) # needed for StringIO example only
  1448. >>> data = np.genfromtxt(s, dtype=None,
  1449. ... names = ['myint','myfloat','mystring'], delimiter=",")
  1450. >>> data
  1451. array((1, 1.3, b'abcde'),
  1452. dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
  1453. Specifying dtype and names
  1454. >>> _ = s.seek(0)
  1455. >>> data = np.genfromtxt(s, dtype="i8,f8,S5",
  1456. ... names=['myint','myfloat','mystring'], delimiter=",")
  1457. >>> data
  1458. array((1, 1.3, b'abcde'),
  1459. dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
  1460. An example with fixed-width columns
  1461. >>> s = StringIO(u"11.3abcde")
  1462. >>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
  1463. ... delimiter=[1,3,5])
  1464. >>> data
  1465. array((1, 1.3, b'abcde'),
  1466. dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', 'S5')])
  1467. An example to show comments
  1468. >>> f = StringIO('''
  1469. ... text,# of chars
  1470. ... hello world,11
  1471. ... numpy,5''')
  1472. >>> np.genfromtxt(f, dtype='S12,S12', delimiter=',')
  1473. array([(b'text', b''), (b'hello world', b'11'), (b'numpy', b'5')],
  1474. dtype=[('f0', 'S12'), ('f1', 'S12')])
  1475. """
  1476. if like is not None:
  1477. return _genfromtxt_with_like(
  1478. fname, dtype=dtype, comments=comments, delimiter=delimiter,
  1479. skip_header=skip_header, skip_footer=skip_footer,
  1480. converters=converters, missing_values=missing_values,
  1481. filling_values=filling_values, usecols=usecols, names=names,
  1482. excludelist=excludelist, deletechars=deletechars,
  1483. replace_space=replace_space, autostrip=autostrip,
  1484. case_sensitive=case_sensitive, defaultfmt=defaultfmt,
  1485. unpack=unpack, usemask=usemask, loose=loose,
  1486. invalid_raise=invalid_raise, max_rows=max_rows, encoding=encoding,
  1487. like=like
  1488. )
  1489. if max_rows is not None:
  1490. if skip_footer:
  1491. raise ValueError(
  1492. "The keywords 'skip_footer' and 'max_rows' can not be "
  1493. "specified at the same time.")
  1494. if max_rows < 1:
  1495. raise ValueError("'max_rows' must be at least 1.")
  1496. if usemask:
  1497. from numpy.ma import MaskedArray, make_mask_descr
  1498. # Check the input dictionary of converters
  1499. user_converters = converters or {}
  1500. if not isinstance(user_converters, dict):
  1501. raise TypeError(
  1502. "The input argument 'converter' should be a valid dictionary "
  1503. "(got '%s' instead)" % type(user_converters))
  1504. if encoding == 'bytes':
  1505. encoding = None
  1506. byte_converters = True
  1507. else:
  1508. byte_converters = False
  1509. # Initialize the filehandle, the LineSplitter and the NameValidator
  1510. try:
  1511. if isinstance(fname, os_PathLike):
  1512. fname = os_fspath(fname)
  1513. if isinstance(fname, str):
  1514. fid = np.lib._datasource.open(fname, 'rt', encoding=encoding)
  1515. fid_ctx = contextlib.closing(fid)
  1516. else:
  1517. fid = fname
  1518. fid_ctx = contextlib.nullcontext(fid)
  1519. fhd = iter(fid)
  1520. except TypeError as e:
  1521. raise TypeError(
  1522. "fname must be a string, filehandle, list of strings, "
  1523. "or generator. Got %s instead." % type(fname)) from e
  1524. with fid_ctx:
  1525. split_line = LineSplitter(delimiter=delimiter, comments=comments,
  1526. autostrip=autostrip, encoding=encoding)
  1527. validate_names = NameValidator(excludelist=excludelist,
  1528. deletechars=deletechars,
  1529. case_sensitive=case_sensitive,
  1530. replace_space=replace_space)
  1531. # Skip the first `skip_header` rows
  1532. try:
  1533. for i in range(skip_header):
  1534. next(fhd)
  1535. # Keep on until we find the first valid values
  1536. first_values = None
  1537. while not first_values:
  1538. first_line = _decode_line(next(fhd), encoding)
  1539. if (names is True) and (comments is not None):
  1540. if comments in first_line:
  1541. first_line = (
  1542. ''.join(first_line.split(comments)[1:]))
  1543. first_values = split_line(first_line)
  1544. except StopIteration:
  1545. # return an empty array if the datafile is empty
  1546. first_line = ''
  1547. first_values = []
  1548. warnings.warn('genfromtxt: Empty input file: "%s"' % fname, stacklevel=2)
  1549. # Should we take the first values as names ?
  1550. if names is True:
  1551. fval = first_values[0].strip()
  1552. if comments is not None:
  1553. if fval in comments:
  1554. del first_values[0]
  1555. # Check the columns to use: make sure `usecols` is a list
  1556. if usecols is not None:
  1557. try:
  1558. usecols = [_.strip() for _ in usecols.split(",")]
  1559. except AttributeError:
  1560. try:
  1561. usecols = list(usecols)
  1562. except TypeError:
  1563. usecols = [usecols, ]
  1564. nbcols = len(usecols or first_values)
  1565. # Check the names and overwrite the dtype.names if needed
  1566. if names is True:
  1567. names = validate_names([str(_.strip()) for _ in first_values])
  1568. first_line = ''
  1569. elif _is_string_like(names):
  1570. names = validate_names([_.strip() for _ in names.split(',')])
  1571. elif names:
  1572. names = validate_names(names)
  1573. # Get the dtype
  1574. if dtype is not None:
  1575. dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
  1576. excludelist=excludelist,
  1577. deletechars=deletechars,
  1578. case_sensitive=case_sensitive,
  1579. replace_space=replace_space)
  1580. # Make sure the names is a list (for 2.5)
  1581. if names is not None:
  1582. names = list(names)
  1583. if usecols:
  1584. for (i, current) in enumerate(usecols):
  1585. # if usecols is a list of names, convert to a list of indices
  1586. if _is_string_like(current):
  1587. usecols[i] = names.index(current)
  1588. elif current < 0:
  1589. usecols[i] = current + len(first_values)
  1590. # If the dtype is not None, make sure we update it
  1591. if (dtype is not None) and (len(dtype) > nbcols):
  1592. descr = dtype.descr
  1593. dtype = np.dtype([descr[_] for _ in usecols])
  1594. names = list(dtype.names)
  1595. # If `names` is not None, update the names
  1596. elif (names is not None) and (len(names) > nbcols):
  1597. names = [names[_] for _ in usecols]
  1598. elif (names is not None) and (dtype is not None):
  1599. names = list(dtype.names)
  1600. # Process the missing values ...............................
  1601. # Rename missing_values for convenience
  1602. user_missing_values = missing_values or ()
  1603. if isinstance(user_missing_values, bytes):
  1604. user_missing_values = user_missing_values.decode('latin1')
  1605. # Define the list of missing_values (one column: one list)
  1606. missing_values = [list(['']) for _ in range(nbcols)]
  1607. # We have a dictionary: process it field by field
  1608. if isinstance(user_missing_values, dict):
  1609. # Loop on the items
  1610. for (key, val) in user_missing_values.items():
  1611. # Is the key a string ?
  1612. if _is_string_like(key):
  1613. try:
  1614. # Transform it into an integer
  1615. key = names.index(key)
  1616. except ValueError:
  1617. # We couldn't find it: the name must have been dropped
  1618. continue
  1619. # Redefine the key as needed if it's a column number
  1620. if usecols:
  1621. try:
  1622. key = usecols.index(key)
  1623. except ValueError:
  1624. pass
  1625. # Transform the value as a list of string
  1626. if isinstance(val, (list, tuple)):
  1627. val = [str(_) for _ in val]
  1628. else:
  1629. val = [str(val), ]
  1630. # Add the value(s) to the current list of missing
  1631. if key is None:
  1632. # None acts as default
  1633. for miss in missing_values:
  1634. miss.extend(val)
  1635. else:
  1636. missing_values[key].extend(val)
  1637. # We have a sequence : each item matches a column
  1638. elif isinstance(user_missing_values, (list, tuple)):
  1639. for (value, entry) in zip(user_missing_values, missing_values):
  1640. value = str(value)
  1641. if value not in entry:
  1642. entry.append(value)
  1643. # We have a string : apply it to all entries
  1644. elif isinstance(user_missing_values, str):
  1645. user_value = user_missing_values.split(",")
  1646. for entry in missing_values:
  1647. entry.extend(user_value)
  1648. # We have something else: apply it to all entries
  1649. else:
  1650. for entry in missing_values:
  1651. entry.extend([str(user_missing_values)])
  1652. # Process the filling_values ...............................
  1653. # Rename the input for convenience
  1654. user_filling_values = filling_values
  1655. if user_filling_values is None:
  1656. user_filling_values = []
  1657. # Define the default
  1658. filling_values = [None] * nbcols
  1659. # We have a dictionary : update each entry individually
  1660. if isinstance(user_filling_values, dict):
  1661. for (key, val) in user_filling_values.items():
  1662. if _is_string_like(key):
  1663. try:
  1664. # Transform it into an integer
  1665. key = names.index(key)
  1666. except ValueError:
  1667. # We couldn't find it: the name must have been dropped,
  1668. continue
  1669. # Redefine the key if it's a column number and usecols is defined
  1670. if usecols:
  1671. try:
  1672. key = usecols.index(key)
  1673. except ValueError:
  1674. pass
  1675. # Add the value to the list
  1676. filling_values[key] = val
  1677. # We have a sequence : update on a one-to-one basis
  1678. elif isinstance(user_filling_values, (list, tuple)):
  1679. n = len(user_filling_values)
  1680. if (n <= nbcols):
  1681. filling_values[:n] = user_filling_values
  1682. else:
  1683. filling_values = user_filling_values[:nbcols]
  1684. # We have something else : use it for all entries
  1685. else:
  1686. filling_values = [user_filling_values] * nbcols
  1687. # Initialize the converters ................................
  1688. if dtype is None:
  1689. # Note: we can't use a [...]*nbcols, as we would have 3 times the same
  1690. # ... converter, instead of 3 different converters.
  1691. converters = [StringConverter(None, missing_values=miss, default=fill)
  1692. for (miss, fill) in zip(missing_values, filling_values)]
  1693. else:
  1694. dtype_flat = flatten_dtype(dtype, flatten_base=True)
  1695. # Initialize the converters
  1696. if len(dtype_flat) > 1:
  1697. # Flexible type : get a converter from each dtype
  1698. zipit = zip(dtype_flat, missing_values, filling_values)
  1699. converters = [StringConverter(dt, locked=True,
  1700. missing_values=miss, default=fill)
  1701. for (dt, miss, fill) in zipit]
  1702. else:
  1703. # Set to a default converter (but w/ different missing values)
  1704. zipit = zip(missing_values, filling_values)
  1705. converters = [StringConverter(dtype, locked=True,
  1706. missing_values=miss, default=fill)
  1707. for (miss, fill) in zipit]
  1708. # Update the converters to use the user-defined ones
  1709. uc_update = []
  1710. for (j, conv) in user_converters.items():
  1711. # If the converter is specified by column names, use the index instead
  1712. if _is_string_like(j):
  1713. try:
  1714. j = names.index(j)
  1715. i = j
  1716. except ValueError:
  1717. continue
  1718. elif usecols:
  1719. try:
  1720. i = usecols.index(j)
  1721. except ValueError:
  1722. # Unused converter specified
  1723. continue
  1724. else:
  1725. i = j
  1726. # Find the value to test - first_line is not filtered by usecols:
  1727. if len(first_line):
  1728. testing_value = first_values[j]
  1729. else:
  1730. testing_value = None
  1731. if conv is bytes:
  1732. user_conv = asbytes
  1733. elif byte_converters:
  1734. # converters may use decode to workaround numpy's old behaviour,
  1735. # so encode the string again before passing to the user converter
  1736. def tobytes_first(x, conv):
  1737. if type(x) is bytes:
  1738. return conv(x)
  1739. return conv(x.encode("latin1"))
  1740. user_conv = functools.partial(tobytes_first, conv=conv)
  1741. else:
  1742. user_conv = conv
  1743. converters[i].update(user_conv, locked=True,
  1744. testing_value=testing_value,
  1745. default=filling_values[i],
  1746. missing_values=missing_values[i],)
  1747. uc_update.append((i, user_conv))
  1748. # Make sure we have the corrected keys in user_converters...
  1749. user_converters.update(uc_update)
  1750. # Fixme: possible error as following variable never used.
  1751. # miss_chars = [_.missing_values for _ in converters]
  1752. # Initialize the output lists ...
  1753. # ... rows
  1754. rows = []
  1755. append_to_rows = rows.append
  1756. # ... masks
  1757. if usemask:
  1758. masks = []
  1759. append_to_masks = masks.append
  1760. # ... invalid
  1761. invalid = []
  1762. append_to_invalid = invalid.append
  1763. # Parse each line
  1764. for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
  1765. values = split_line(line)
  1766. nbvalues = len(values)
  1767. # Skip an empty line
  1768. if nbvalues == 0:
  1769. continue
  1770. if usecols:
  1771. # Select only the columns we need
  1772. try:
  1773. values = [values[_] for _ in usecols]
  1774. except IndexError:
  1775. append_to_invalid((i + skip_header + 1, nbvalues))
  1776. continue
  1777. elif nbvalues != nbcols:
  1778. append_to_invalid((i + skip_header + 1, nbvalues))
  1779. continue
  1780. # Store the values
  1781. append_to_rows(tuple(values))
  1782. if usemask:
  1783. append_to_masks(tuple([v.strip() in m
  1784. for (v, m) in zip(values,
  1785. missing_values)]))
  1786. if len(rows) == max_rows:
  1787. break
  1788. # Upgrade the converters (if needed)
  1789. if dtype is None:
  1790. for (i, converter) in enumerate(converters):
  1791. current_column = [itemgetter(i)(_m) for _m in rows]
  1792. try:
  1793. converter.iterupgrade(current_column)
  1794. except ConverterLockError:
  1795. errmsg = "Converter #%i is locked and cannot be upgraded: " % i
  1796. current_column = map(itemgetter(i), rows)
  1797. for (j, value) in enumerate(current_column):
  1798. try:
  1799. converter.upgrade(value)
  1800. except (ConverterError, ValueError):
  1801. errmsg += "(occurred line #%i for value '%s')"
  1802. errmsg %= (j + 1 + skip_header, value)
  1803. raise ConverterError(errmsg)
  1804. # Check that we don't have invalid values
  1805. nbinvalid = len(invalid)
  1806. if nbinvalid > 0:
  1807. nbrows = len(rows) + nbinvalid - skip_footer
  1808. # Construct the error message
  1809. template = " Line #%%i (got %%i columns instead of %i)" % nbcols
  1810. if skip_footer > 0:
  1811. nbinvalid_skipped = len([_ for _ in invalid
  1812. if _[0] > nbrows + skip_header])
  1813. invalid = invalid[:nbinvalid - nbinvalid_skipped]
  1814. skip_footer -= nbinvalid_skipped
  1815. #
  1816. # nbrows -= skip_footer
  1817. # errmsg = [template % (i, nb)
  1818. # for (i, nb) in invalid if i < nbrows]
  1819. # else:
  1820. errmsg = [template % (i, nb)
  1821. for (i, nb) in invalid]
  1822. if len(errmsg):
  1823. errmsg.insert(0, "Some errors were detected !")
  1824. errmsg = "\n".join(errmsg)
  1825. # Raise an exception ?
  1826. if invalid_raise:
  1827. raise ValueError(errmsg)
  1828. # Issue a warning ?
  1829. else:
  1830. warnings.warn(errmsg, ConversionWarning, stacklevel=2)
  1831. # Strip the last skip_footer data
  1832. if skip_footer > 0:
  1833. rows = rows[:-skip_footer]
  1834. if usemask:
  1835. masks = masks[:-skip_footer]
  1836. # Convert each value according to the converter:
  1837. # We want to modify the list in place to avoid creating a new one...
  1838. if loose:
  1839. rows = list(
  1840. zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
  1841. for (i, conv) in enumerate(converters)]))
  1842. else:
  1843. rows = list(
  1844. zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
  1845. for (i, conv) in enumerate(converters)]))
  1846. # Reset the dtype
  1847. data = rows
  1848. if dtype is None:
  1849. # Get the dtypes from the types of the converters
  1850. column_types = [conv.type for conv in converters]
  1851. # Find the columns with strings...
  1852. strcolidx = [i for (i, v) in enumerate(column_types)
  1853. if v == np.unicode_]
  1854. if byte_converters and strcolidx:
  1855. # convert strings back to bytes for backward compatibility
  1856. warnings.warn(
  1857. "Reading unicode strings without specifying the encoding "
  1858. "argument is deprecated. Set the encoding, use None for the "
  1859. "system default.",
  1860. np.VisibleDeprecationWarning, stacklevel=2)
  1861. def encode_unicode_cols(row_tup):
  1862. row = list(row_tup)
  1863. for i in strcolidx:
  1864. row[i] = row[i].encode('latin1')
  1865. return tuple(row)
  1866. try:
  1867. data = [encode_unicode_cols(r) for r in data]
  1868. except UnicodeEncodeError:
  1869. pass
  1870. else:
  1871. for i in strcolidx:
  1872. column_types[i] = np.bytes_
  1873. # Update string types to be the right length
  1874. sized_column_types = column_types[:]
  1875. for i, col_type in enumerate(column_types):
  1876. if np.issubdtype(col_type, np.character):
  1877. n_chars = max(len(row[i]) for row in data)
  1878. sized_column_types[i] = (col_type, n_chars)
  1879. if names is None:
  1880. # If the dtype is uniform (before sizing strings)
  1881. base = {
  1882. c_type
  1883. for c, c_type in zip(converters, column_types)
  1884. if c._checked}
  1885. if len(base) == 1:
  1886. uniform_type, = base
  1887. (ddtype, mdtype) = (uniform_type, bool)
  1888. else:
  1889. ddtype = [(defaultfmt % i, dt)
  1890. for (i, dt) in enumerate(sized_column_types)]
  1891. if usemask:
  1892. mdtype = [(defaultfmt % i, bool)
  1893. for (i, dt) in enumerate(sized_column_types)]
  1894. else:
  1895. ddtype = list(zip(names, sized_column_types))
  1896. mdtype = list(zip(names, [bool] * len(sized_column_types)))
  1897. output = np.array(data, dtype=ddtype)
  1898. if usemask:
  1899. outputmask = np.array(masks, dtype=mdtype)
  1900. else:
  1901. # Overwrite the initial dtype names if needed
  1902. if names and dtype.names is not None:
  1903. dtype.names = names
  1904. # Case 1. We have a structured type
  1905. if len(dtype_flat) > 1:
  1906. # Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
  1907. # First, create the array using a flattened dtype:
  1908. # [('a', int), ('b1', int), ('b2', float)]
  1909. # Then, view the array using the specified dtype.
  1910. if 'O' in (_.char for _ in dtype_flat):
  1911. if has_nested_fields(dtype):
  1912. raise NotImplementedError(
  1913. "Nested fields involving objects are not supported...")
  1914. else:
  1915. output = np.array(data, dtype=dtype)
  1916. else:
  1917. rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
  1918. output = rows.view(dtype)
  1919. # Now, process the rowmasks the same way
  1920. if usemask:
  1921. rowmasks = np.array(
  1922. masks, dtype=np.dtype([('', bool) for t in dtype_flat]))
  1923. # Construct the new dtype
  1924. mdtype = make_mask_descr(dtype)
  1925. outputmask = rowmasks.view(mdtype)
  1926. # Case #2. We have a basic dtype
  1927. else:
  1928. # We used some user-defined converters
  1929. if user_converters:
  1930. ishomogeneous = True
  1931. descr = []
  1932. for i, ttype in enumerate([conv.type for conv in converters]):
  1933. # Keep the dtype of the current converter
  1934. if i in user_converters:
  1935. ishomogeneous &= (ttype == dtype.type)
  1936. if np.issubdtype(ttype, np.character):
  1937. ttype = (ttype, max(len(row[i]) for row in data))
  1938. descr.append(('', ttype))
  1939. else:
  1940. descr.append(('', dtype))
  1941. # So we changed the dtype ?
  1942. if not ishomogeneous:
  1943. # We have more than one field
  1944. if len(descr) > 1:
  1945. dtype = np.dtype(descr)
  1946. # We have only one field: drop the name if not needed.
  1947. else:
  1948. dtype = np.dtype(ttype)
  1949. #
  1950. output = np.array(data, dtype)
  1951. if usemask:
  1952. if dtype.names is not None:
  1953. mdtype = [(_, bool) for _ in dtype.names]
  1954. else:
  1955. mdtype = bool
  1956. outputmask = np.array(masks, dtype=mdtype)
  1957. # Try to take care of the missing data we missed
  1958. names = output.dtype.names
  1959. if usemask and names:
  1960. for (name, conv) in zip(names, converters):
  1961. missing_values = [conv(_) for _ in conv.missing_values
  1962. if _ != '']
  1963. for mval in missing_values:
  1964. outputmask[name] |= (output[name] == mval)
  1965. # Construct the final array
  1966. if usemask:
  1967. output = output.view(MaskedArray)
  1968. output._mask = outputmask
  1969. output = np.squeeze(output)
  1970. if unpack:
  1971. if names is None:
  1972. return output.T
  1973. elif len(names) == 1:
  1974. # squeeze single-name dtypes too
  1975. return output[names[0]]
  1976. else:
  1977. # For structured arrays with multiple fields,
  1978. # return an array for each field.
  1979. return [output[field] for field in names]
  1980. return output
  1981. _genfromtxt_with_like = array_function_dispatch(
  1982. _genfromtxt_dispatcher
  1983. )(genfromtxt)
  1984. def ndfromtxt(fname, **kwargs):
  1985. """
  1986. Load ASCII data stored in a file and return it as a single array.
  1987. .. deprecated:: 1.17
  1988. ndfromtxt` is a deprecated alias of `genfromtxt` which
  1989. overwrites the ``usemask`` argument with `False` even when
  1990. explicitly called as ``ndfromtxt(..., usemask=True)``.
  1991. Use `genfromtxt` instead.
  1992. Parameters
  1993. ----------
  1994. fname, kwargs : For a description of input parameters, see `genfromtxt`.
  1995. See Also
  1996. --------
  1997. numpy.genfromtxt : generic function.
  1998. """
  1999. kwargs['usemask'] = False
  2000. # Numpy 1.17
  2001. warnings.warn(
  2002. "np.ndfromtxt is a deprecated alias of np.genfromtxt, "
  2003. "prefer the latter.",
  2004. DeprecationWarning, stacklevel=2)
  2005. return genfromtxt(fname, **kwargs)
  2006. def mafromtxt(fname, **kwargs):
  2007. """
  2008. Load ASCII data stored in a text file and return a masked array.
  2009. .. deprecated:: 1.17
  2010. np.mafromtxt is a deprecated alias of `genfromtxt` which
  2011. overwrites the ``usemask`` argument with `True` even when
  2012. explicitly called as ``mafromtxt(..., usemask=False)``.
  2013. Use `genfromtxt` instead.
  2014. Parameters
  2015. ----------
  2016. fname, kwargs : For a description of input parameters, see `genfromtxt`.
  2017. See Also
  2018. --------
  2019. numpy.genfromtxt : generic function to load ASCII data.
  2020. """
  2021. kwargs['usemask'] = True
  2022. # Numpy 1.17
  2023. warnings.warn(
  2024. "np.mafromtxt is a deprecated alias of np.genfromtxt, "
  2025. "prefer the latter.",
  2026. DeprecationWarning, stacklevel=2)
  2027. return genfromtxt(fname, **kwargs)
  2028. def recfromtxt(fname, **kwargs):
  2029. """
  2030. Load ASCII data from a file and return it in a record array.
  2031. If ``usemask=False`` a standard `recarray` is returned,
  2032. if ``usemask=True`` a MaskedRecords array is returned.
  2033. Parameters
  2034. ----------
  2035. fname, kwargs : For a description of input parameters, see `genfromtxt`.
  2036. See Also
  2037. --------
  2038. numpy.genfromtxt : generic function
  2039. Notes
  2040. -----
  2041. By default, `dtype` is None, which means that the data-type of the output
  2042. array will be determined from the data.
  2043. """
  2044. kwargs.setdefault("dtype", None)
  2045. usemask = kwargs.get('usemask', False)
  2046. output = genfromtxt(fname, **kwargs)
  2047. if usemask:
  2048. from numpy.ma.mrecords import MaskedRecords
  2049. output = output.view(MaskedRecords)
  2050. else:
  2051. output = output.view(np.recarray)
  2052. return output
  2053. def recfromcsv(fname, **kwargs):
  2054. """
  2055. Load ASCII data stored in a comma-separated file.
  2056. The returned array is a record array (if ``usemask=False``, see
  2057. `recarray`) or a masked record array (if ``usemask=True``,
  2058. see `ma.mrecords.MaskedRecords`).
  2059. Parameters
  2060. ----------
  2061. fname, kwargs : For a description of input parameters, see `genfromtxt`.
  2062. See Also
  2063. --------
  2064. numpy.genfromtxt : generic function to load ASCII data.
  2065. Notes
  2066. -----
  2067. By default, `dtype` is None, which means that the data-type of the output
  2068. array will be determined from the data.
  2069. """
  2070. # Set default kwargs for genfromtxt as relevant to csv import.
  2071. kwargs.setdefault("case_sensitive", "lower")
  2072. kwargs.setdefault("names", True)
  2073. kwargs.setdefault("delimiter", ",")
  2074. kwargs.setdefault("dtype", None)
  2075. output = genfromtxt(fname, **kwargs)
  2076. usemask = kwargs.get("usemask", False)
  2077. if usemask:
  2078. from numpy.ma.mrecords import MaskedRecords
  2079. output = output.view(MaskedRecords)
  2080. else:
  2081. output = output.view(np.recarray)
  2082. return output