1. from __future__ import division, absolute_import, print_function
  2.  
  3. import io
  4. import sys
  5. import os
  6. import re
  7. import itertools
  8. import warnings
  9. import weakref
  10. from operator import itemgetter, index as opindex
  11.  
  12. import numpy as np
  13. from . import format
  14. from ._datasource import DataSource
  15. from numpy.core.multiarray import packbits, unpackbits
  16. from ._iotools import (
  17. LineSplitter, NameValidator, StringConverter, ConverterError,
  18. ConverterLockError, ConversionWarning, _is_string_like,
  19. has_nested_fields, flatten_dtype, easy_dtype, _decode_line
  20. )
  21.  
  22. from numpy.compat import (
  23. asbytes, asstr, asunicode, asbytes_nested, bytes, basestring, unicode,
  24. is_pathlib_path
  25. )
  26.  
  27. if sys.version_info[0] >= 3:
  28. import pickle
  29. else:
  30. import cPickle as pickle
  31. from future_builtins import map
  32.  
  33. loads = pickle.loads
  34.  
  35. __all__ = [
  36. 'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
  37. 'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
  38. 'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
  39. ]
  40.  
  41. class BagObj(object):
  42. """
  43. BagObj(obj)
  44.  
  45. Convert attribute look-ups to getitems on the object passed in.
  46.  
  47. Parameters
  48. ----------
  49. obj : class instance
  50. Object on which attribute look-up is performed.
  51.  
  52. Examples
  53. --------
  54. >>> from numpy.lib.npyio import BagObj as BO
  55. >>> class BagDemo(object):
  56. ... def __getitem__(self, key): # An instance of BagObj(BagDemo)
  57. ... # will call this method when any
  58. ... # attribute look-up is required
  59. ... result = "Doesn't matter what you want, "
  60. ... return result + "you're gonna get this"
  61. ...
  62. >>> demo_obj = BagDemo()
  63. >>> bagobj = BO(demo_obj)
  64. >>> bagobj.hello_there
  65. "Doesn't matter what you want, you're gonna get this"
  66. >>> bagobj.I_can_be_anything
  67. "Doesn't matter what you want, you're gonna get this"
  68.  
  69. """
  70.  
  71. def __init__(self, obj):
  72. # Use weakref to make NpzFile objects collectable by refcount
  73. self._obj = weakref.proxy(obj)
  74.  
  75. def __getattribute__(self, key):
  76. try:
  77. return object.__getattribute__(self, '_obj')[key]
  78. except KeyError:
  79. raise AttributeError(key)
  80.  
  81. def __dir__(self):
  82. """
  83. Enables dir(bagobj) to list the files in an NpzFile.
  84.  
  85. This also enables tab-completion in an interpreter or IPython.
  86. """
  87. return object.__getattribute__(self, '_obj').keys()
  88.  
  89. def zipfile_factory(file, *args, **kwargs):
  90. """
  91. Create a ZipFile.
  92.  
  93. Allows for Zip64, and the `file` argument can accept file, str, or
  94. pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile
  95. constructor.
  96. """
  97. if is_pathlib_path(file):
  98. file = str(file)
  99. import zipfile
  100. kwargs['allowZip64'] = True
  101. return zipfile.ZipFile(file, *args, **kwargs)
  102.  
  103. class NpzFile(object):
  104. """
  105. NpzFile(fid)
  106.  
  107. A dictionary-like object with lazy-loading of files in the zipped
  108. archive provided on construction.
  109.  
  110. `NpzFile` is used to load files in the NumPy ``.npz`` data archive
  111. format. It assumes that files in the archive have a ``.npy`` extension,
  112. other files are ignored.
  113.  
  114. The arrays and file strings are lazily loaded on either
  115. getitem access using ``obj['key']`` or attribute lookup using
  116. ``obj.f.key``. A list of all files (without ``.npy`` extensions) can
  117. be obtained with ``obj.files`` and the ZipFile object itself using
  118. ``obj.zip``.
  119.  
  120. Attributes
  121. ----------
  122. files : list of str
  123. List of all files in the archive with a ``.npy`` extension.
  124. zip : ZipFile instance
  125. The ZipFile object initialized with the zipped archive.
  126. f : BagObj instance
  127. An object on which attribute can be performed as an alternative
  128. to getitem access on the `NpzFile` instance itself.
  129. allow_pickle : bool, optional
  130. Allow loading pickled data. Default: True
  131. pickle_kwargs : dict, optional
  132. Additional keyword arguments to pass on to pickle.load.
  133. These are only useful when loading object arrays saved on
  134. Python 2 when using Python 3.
  135.  
  136. Parameters
  137. ----------
  138. fid : file or str
  139. The zipped archive to open. This is either a file-like object
  140. or a string containing the path to the archive.
  141. own_fid : bool, optional
  142. Whether NpzFile should close the file handle.
  143. Requires that `fid` is a file-like object.
  144.  
  145. Examples
  146. --------
  147. >>> from tempfile import TemporaryFile
  148. >>> outfile = TemporaryFile()
  149. >>> x = np.arange(10)
  150. >>> y = np.sin(x)
  151. >>> np.savez(outfile, x=x, y=y)
  152. >>> outfile.seek(0)
  153.  
  154. >>> npz = np.load(outfile)
  155. >>> isinstance(npz, np.lib.io.NpzFile)
  156. True
  157. >>> npz.files
  158. ['y', 'x']
  159. >>> npz['x'] # getitem access
  160. array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
  161. >>> npz.f.x # attribute lookup
  162. array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
  163.  
  164. """
  165.  
  166. def __init__(self, fid, own_fid=False, allow_pickle=True,
  167. pickle_kwargs=None):
  168. # Import is postponed to here since zipfile depends on gzip, an
  169. # optional component of the so-called standard library.
  170. _zip = zipfile_factory(fid)
  171. self._files = _zip.namelist()
  172. self.files = []
  173. self.allow_pickle = allow_pickle
  174. self.pickle_kwargs = pickle_kwargs
  175. for x in self._files:
  176. if x.endswith('.npy'):
  177. self.files.append(x[:-4])
  178. else:
  179. self.files.append(x)
  180. self.zip = _zip
  181. self.f = BagObj(self)
  182. if own_fid:
  183. self.fid = fid
  184. else:
  185. self.fid = None
  186.  
  187. def __enter__(self):
  188. return self
  189.  
  190. def __exit__(self, exc_type, exc_value, traceback):
  191. self.close()
  192.  
  193. def close(self):
  194. """
  195. Close the file.
  196.  
  197. """
  198. if self.zip is not None:
  199. self.zip.close()
  200. self.zip = None
  201. if self.fid is not None:
  202. self.fid.close()
  203. self.fid = None
  204. self.f = None # break reference cycle
  205.  
  206. def __del__(self):
  207. self.close()
  208.  
  209. def __getitem__(self, key):
  210. # FIXME: This seems like it will copy strings around
  211. # more than is strictly necessary. The zipfile
  212. # will read the string and then
  213. # the format.read_array will copy the string
  214. # to another place in memory.
  215. # It would be better if the zipfile could read
  216. # (or at least uncompress) the data
  217. # directly into the array memory.
  218. member = 0
  219. if key in self._files:
  220. member = 1
  221. elif key in self.files:
  222. member = 1
  223. key += '.npy'
  224. if member:
  225. bytes = self.zip.open(key)
  226. magic = bytes.read(len(format.MAGIC_PREFIX))
  227. bytes.close()
  228. if magic == format.MAGIC_PREFIX:
  229. bytes = self.zip.open(key)
  230. return format.read_array(bytes,
  231. allow_pickle=self.allow_pickle,
  232. pickle_kwargs=self.pickle_kwargs)
  233. else:
  234. return self.zip.read(key)
  235. else:
  236. raise KeyError("%s is not a file in the archive" % key)
  237.  
  238. def __iter__(self):
  239. return iter(self.files)
  240.  
  241. def items(self):
  242. """
  243. Return a list of tuples, with each tuple (filename, array in file).
  244.  
  245. """
  246. return [(f, self[f]) for f in self.files]
  247.  
  248. def iteritems(self):
  249. """Generator that returns tuples (filename, array in file)."""
  250. for f in self.files:
  251. yield (f, self[f])
  252.  
  253. def keys(self):
  254. """Return files in the archive with a ``.npy`` extension."""
  255. return self.files
  256.  
  257. def iterkeys(self):
  258. """Return an iterator over the files in the archive."""
  259. return self.__iter__()
  260.  
  261. def __contains__(self, key):
  262. return self.files.__contains__(key)
  263.  
  264. def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
  265. encoding='ASCII'):
  266. """
  267. Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
  268.  
  269. Parameters
  270. ----------
  271. file : file-like object, string, or pathlib.Path
  272. The file to read. File-like objects must support the
  273. ``seek()`` and ``read()`` methods. Pickled files require that the
  274. file-like object support the ``readline()`` method as well.
  275. mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
  276. If not None, then memory-map the file, using the given mode (see
  277. `numpy.memmap` for a detailed description of the modes). A
  278. memory-mapped array is kept on disk. However, it can be accessed
  279. and sliced like any ndarray. Memory mapping is especially useful
  280. for accessing small fragments of large files without reading the
  281. entire file into memory.
  282. allow_pickle : bool, optional
  283. Allow loading pickled object arrays stored in npy files. Reasons for
  284. disallowing pickles include security, as loading pickled data can
  285. execute arbitrary code. If pickles are disallowed, loading object
  286. arrays will fail.
  287. Default: True
  288. fix_imports : bool, optional
  289. Only useful when loading Python 2 generated pickled files on Python 3,
  290. which includes npy/npz files containing object arrays. If `fix_imports`
  291. is True, pickle will try to map the old Python 2 names to the new names
  292. used in Python 3.
  293. encoding : str, optional
  294. What encoding to use when reading Python 2 strings. Only useful when
  295. loading Python 2 generated pickled files in Python 3, which includes
  296. npy/npz files containing object arrays. Values other than 'latin1',
  297. 'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
  298. data. Default: 'ASCII'
  299.  
  300. Returns
  301. -------
  302. result : array, tuple, dict, etc.
  303. Data stored in the file. For ``.npz`` files, the returned instance
  304. of NpzFile class must be closed to avoid leaking file descriptors.
  305.  
  306. Raises
  307. ------
  308. IOError
  309. If the input file does not exist or cannot be read.
  310. ValueError
  311. The file contains an object array, but allow_pickle=False given.
  312.  
  313. See Also
  314. --------
  315. save, savez, savez_compressed, loadtxt
  316. memmap : Create a memory-map to an array stored in a file on disk.
  317. lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.
  318.  
  319. Notes
  320. -----
  321. - If the file contains pickle data, then whatever object is stored
  322. in the pickle is returned.
  323. - If the file is a ``.npy`` file, then a single array is returned.
  324. - If the file is a ``.npz`` file, then a dictionary-like object is
  325. returned, containing ``{filename: array}`` key-value pairs, one for
  326. each file in the archive.
  327. - If the file is a ``.npz`` file, the returned value supports the
  328. context manager protocol in a similar fashion to the open function::
  329.  
  330. with load('foo.npz') as data:
  331. a = data['a']
  332.  
  333. The underlying file descriptor is closed when exiting the 'with'
  334. block.
  335.  
  336. Examples
  337. --------
  338. Store data to disk, and load it again:
  339.  
  340. >>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
  341. >>> np.load('/tmp/123.npy')
  342. array([[1, 2, 3],
  343. [4, 5, 6]])
  344.  
  345. Store compressed data to disk, and load it again:
  346.  
  347. >>> a=np.array([[1, 2, 3], [4, 5, 6]])
  348. >>> b=np.array([1, 2])
  349. >>> np.savez('/tmp/123.npz', a=a, b=b)
  350. >>> data = np.load('/tmp/123.npz')
  351. >>> data['a']
  352. array([[1, 2, 3],
  353. [4, 5, 6]])
  354. >>> data['b']
  355. array([1, 2])
  356. >>> data.close()
  357.  
  358. Mem-map the stored array, and then access the second row
  359. directly from disk:
  360.  
  361. >>> X = np.load('/tmp/123.npy', mmap_mode='r')
  362. >>> X[1, :]
  363. memmap([4, 5, 6])
  364.  
  365. """
  366. own_fid = False
  367. if isinstance(file, basestring):
  368. fid = open(file, "rb")
  369. own_fid = True
  370. elif is_pathlib_path(file):
  371. fid = file.open("rb")
  372. own_fid = True
  373. else:
  374. fid = file
  375.  
  376. if encoding not in ('ASCII', 'latin1', 'bytes'):
  377. # The 'encoding' value for pickle also affects what encoding
  378. # the serialized binary data of NumPy arrays is loaded
  379. # in. Pickle does not pass on the encoding information to
  380. # NumPy. The unpickling code in numpy.core.multiarray is
  381. # written to assume that unicode data appearing where binary
  382. # should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
  383. #
  384. # Other encoding values can corrupt binary data, and we
  385. # purposefully disallow them. For the same reason, the errors=
  386. # argument is not exposed, as values other than 'strict'
  387. # result can similarly silently corrupt numerical data.
  388. raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
  389.  
  390. if sys.version_info[0] >= 3:
  391. pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
  392. else:
  393. # Nothing to do on Python 2
  394. pickle_kwargs = {}
  395.  
  396. try:
  397. # Code to distinguish from NumPy binary files and pickles.
  398. _ZIP_PREFIX = b'PK\x03\x04'
  399. N = len(format.MAGIC_PREFIX)
  400. magic = fid.read(N)
  401. # If the file size is less than N, we need to make sure not
  402. # to seek past the beginning of the file
  403. fid.seek(-min(N, len(magic)), 1) # back-up
  404. if magic.startswith(_ZIP_PREFIX):
  405. # zip-file (assume .npz)
  406. # Transfer file ownership to NpzFile
  407. tmp = own_fid
  408. own_fid = False
  409. return NpzFile(fid, own_fid=tmp, allow_pickle=allow_pickle,
  410. pickle_kwargs=pickle_kwargs)
  411. elif magic == format.MAGIC_PREFIX:
  412. # .npy file
  413. if mmap_mode:
  414. return format.open_memmap(file, mode=mmap_mode)
  415. else:
  416. return format.read_array(fid, allow_pickle=allow_pickle,
  417. pickle_kwargs=pickle_kwargs)
  418. else:
  419. # Try a pickle
  420. if not allow_pickle:
  421. raise ValueError("allow_pickle=False, but file does not contain "
  422. "non-pickled data")
  423. try:
  424. return pickle.load(fid, **pickle_kwargs)
  425. except Exception:
  426. raise IOError(
  427. "Failed to interpret file %s as a pickle" % repr(file))
  428. finally:
  429. if own_fid:
  430. fid.close()
  431.  
  432. def save(file, arr, allow_pickle=True, fix_imports=True):
  433. """
  434. Save an array to a binary file in NumPy ``.npy`` format.
  435.  
  436. Parameters
  437. ----------
  438. file : file, str, or pathlib.Path
  439. File or filename to which the data is saved. If file is a file-object,
  440. then the filename is unchanged. If file is a string or Path, a ``.npy``
  441. extension will be appended to the file name if it does not already
  442. have one.
  443. arr : array_like
  444. Array data to be saved.
  445. allow_pickle : bool, optional
  446. Allow saving object arrays using Python pickles. Reasons for disallowing
  447. pickles include security (loading pickled data can execute arbitrary
  448. code) and portability (pickled objects may not be loadable on different
  449. Python installations, for example if the stored objects require libraries
  450. that are not available, and not all pickled data is compatible between
  451. Python 2 and Python 3).
  452. Default: True
  453. fix_imports : bool, optional
  454. Only useful in forcing objects in object arrays on Python 3 to be
  455. pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
  456. will try to map the new Python 3 names to the old module names used in
  457. Python 2, so that the pickle data stream is readable with Python 2.
  458.  
  459. See Also
  460. --------
  461. savez : Save several arrays into a ``.npz`` archive
  462. savetxt, load
  463.  
  464. Notes
  465. -----
  466. For a description of the ``.npy`` format, see the module docstring
  467. of `numpy.lib.format` or the NumPy Enhancement Proposal
  468. http://docs.scipy.org/doc/numpy/neps/npy-format.html
  469.  
  470. Examples
  471. --------
  472. >>> from tempfile import TemporaryFile
  473. >>> outfile = TemporaryFile()
  474.  
  475. >>> x = np.arange(10)
  476. >>> np.save(outfile, x)
  477.  
  478. >>> outfile.seek(0) # Only needed here to simulate closing & reopening file
  479. >>> np.load(outfile)
  480. array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
  481.  
  482. """
  483. own_fid = False
  484. if isinstance(file, basestring):
  485. if not file.endswith('.npy'):
  486. file = file + '.npy'
  487. fid = open(file, "wb")
  488. own_fid = True
  489. elif is_pathlib_path(file):
  490. if not file.name.endswith('.npy'):
  491. file = file.parent / (file.name + '.npy')
  492. fid = file.open("wb")
  493. own_fid = True
  494. else:
  495. fid = file
  496.  
  497. if sys.version_info[0] >= 3:
  498. pickle_kwargs = dict(fix_imports=fix_imports)
  499. else:
  500. # Nothing to do on Python 2
  501. pickle_kwargs = None
  502.  
  503. try:
  504. arr = np.asanyarray(arr)
  505. format.write_array(fid, arr, allow_pickle=allow_pickle,
  506. pickle_kwargs=pickle_kwargs)
  507. finally:
  508. if own_fid:
  509. fid.close()
  510.  
  511. def savez(file, *args, **kwds):
  512. """
  513. Save several arrays into a single file in uncompressed ``.npz`` format.
  514.  
  515. If arguments are passed in with no keywords, the corresponding variable
  516. names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword
  517. arguments are given, the corresponding variable names, in the ``.npz``
  518. file will match the keyword names.
  519.  
  520. Parameters
  521. ----------
  522. file : str or file
  523. Either the file name (string) or an open file (file-like object)
  524. where the data will be saved. If file is a string or a Path, the
  525. ``.npz`` extension will be appended to the file name if it is not
  526. already there.
  527. args : Arguments, optional
  528. Arrays to save to the file. Since it is not possible for Python to
  529. know the names of the arrays outside `savez`, the arrays will be saved
  530. with names "arr_0", "arr_1", and so on. These arguments can be any
  531. expression.
  532. kwds : Keyword arguments, optional
  533. Arrays to save to the file. Arrays will be saved in the file with the
  534. keyword names.
  535.  
  536. Returns
  537. -------
  538. None
  539.  
  540. See Also
  541. --------
  542. save : Save a single array to a binary file in NumPy format.
  543. savetxt : Save an array to a file as plain text.
  544. savez_compressed : Save several arrays into a compressed ``.npz`` archive
  545.  
  546. Notes
  547. -----
  548. The ``.npz`` file format is a zipped archive of files named after the
  549. variables they contain. The archive is not compressed and each file
  550. in the archive contains one variable in ``.npy`` format. For a
  551. description of the ``.npy`` format, see `numpy.lib.format` or the
  552. NumPy Enhancement Proposal
  553. http://docs.scipy.org/doc/numpy/neps/npy-format.html
  554.  
  555. When opening the saved ``.npz`` file with `load` a `NpzFile` object is
  556. returned. This is a dictionary-like object which can be queried for
  557. its list of arrays (with the ``.files`` attribute), and for the arrays
  558. themselves.
  559.  
  560. Examples
  561. --------
  562. >>> from tempfile import TemporaryFile
  563. >>> outfile = TemporaryFile()
  564. >>> x = np.arange(10)
  565. >>> y = np.sin(x)
  566.  
  567. Using `savez` with \\*args, the arrays are saved with default names.
  568.  
  569. >>> np.savez(outfile, x, y)
  570. >>> outfile.seek(0) # Only needed here to simulate closing & reopening file
  571. >>> npzfile = np.load(outfile)
  572. >>> npzfile.files
  573. ['arr_1', 'arr_0']
  574. >>> npzfile['arr_0']
  575. array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
  576.  
  577. Using `savez` with \\**kwds, the arrays are saved with the keyword names.
  578.  
  579. >>> outfile = TemporaryFile()
  580. >>> np.savez(outfile, x=x, y=y)
  581. >>> outfile.seek(0)
  582. >>> npzfile = np.load(outfile)
  583. >>> npzfile.files
  584. ['y', 'x']
  585. >>> npzfile['x']
  586. array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
  587.  
  588. """
  589. _savez(file, args, kwds, False)
  590.  
  591. def savez_compressed(file, *args, **kwds):
  592. """
  593. Save several arrays into a single file in compressed ``.npz`` format.
  594.  
  595. If keyword arguments are given, then filenames are taken from the keywords.
  596. If arguments are passed in with no keywords, then stored file names are
  597. arr_0, arr_1, etc.
  598.  
  599. Parameters
  600. ----------
  601. file : str or file
  602. Either the file name (string) or an open file (file-like object)
  603. where the data will be saved. If file is a string or a Path, the
  604. ``.npz`` extension will be appended to the file name if it is not
  605. already there.
  606. args : Arguments, optional
  607. Arrays to save to the file. Since it is not possible for Python to
  608. know the names of the arrays outside `savez`, the arrays will be saved
  609. with names "arr_0", "arr_1", and so on. These arguments can be any
  610. expression.
  611. kwds : Keyword arguments, optional
  612. Arrays to save to the file. Arrays will be saved in the file with the
  613. keyword names.
  614.  
  615. Returns
  616. -------
  617. None
  618.  
  619. See Also
  620. --------
  621. numpy.save : Save a single array to a binary file in NumPy format.
  622. numpy.savetxt : Save an array to a file as plain text.
  623. numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
  624. numpy.load : Load the files created by savez_compressed.
  625.  
  626. Notes
  627. -----
  628. The ``.npz`` file format is a zipped archive of files named after the
  629. variables they contain. The archive is compressed with
  630. ``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable
  631. in ``.npy`` format. For a description of the ``.npy`` format, see
  632. `numpy.lib.format` or the NumPy Enhancement Proposal
  633. http://docs.scipy.org/doc/numpy/neps/npy-format.html
  634.  
  635. When opening the saved ``.npz`` file with `load` a `NpzFile` object is
  636. returned. This is a dictionary-like object which can be queried for
  637. its list of arrays (with the ``.files`` attribute), and for the arrays
  638. themselves.
  639.  
  640. Examples
  641. --------
  642. >>> test_array = np.random.rand(3, 2)
  643. >>> test_vector = np.random.rand(4)
  644. >>> np.savez_compressed('/tmp/123', a=test_array, b=test_vector)
  645. >>> loaded = np.load('/tmp/123.npz')
  646. >>> print(np.array_equal(test_array, loaded['a']))
  647. True
  648. >>> print(np.array_equal(test_vector, loaded['b']))
  649. True
  650.  
  651. """
  652. _savez(file, args, kwds, True)
  653.  
  654. def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
  655. # Import is postponed to here since zipfile depends on gzip, an optional
  656. # component of the so-called standard library.
  657. import zipfile
  658.  
  659. if isinstance(file, basestring):
  660. if not file.endswith('.npz'):
  661. file = file + '.npz'
  662. elif is_pathlib_path(file):
  663. if not file.name.endswith('.npz'):
  664. file = file.parent / (file.name + '.npz')
  665.  
  666. namedict = kwds
  667. for i, val in enumerate(args):
  668. key = 'arr_%d' % i
  669. if key in namedict.keys():
  670. raise ValueError(
  671. "Cannot use un-named variables and keyword %s" % key)
  672. namedict[key] = val
  673.  
  674. if compress:
  675. compression = zipfile.ZIP_DEFLATED
  676. else:
  677. compression = zipfile.ZIP_STORED
  678.  
  679. zipf = zipfile_factory(file, mode="w", compression=compression)
  680.  
  681. if sys.version_info >= (3, 6):
  682. # Since Python 3.6 it is possible to write directly to a ZIP file.
  683. for key, val in namedict.items():
  684. fname = key + '.npy'
  685. val = np.asanyarray(val)
  686. force_zip64 = val.nbytes >= 2**30
  687. with zipf.open(fname, 'w', force_zip64=force_zip64) as fid:
  688. format.write_array(fid, val,
  689. allow_pickle=allow_pickle,
  690. pickle_kwargs=pickle_kwargs)
  691. else:
  692. # Stage arrays in a temporary file on disk, before writing to zip.
  693.  
  694. # Import deferred for startup time improvement
  695. import tempfile
  696. # Since target file might be big enough to exceed capacity of a global
  697. # temporary directory, create temp file side-by-side with the target file.
  698. file_dir, file_prefix = os.path.split(file) if _is_string_like(file) else (None, 'tmp')
  699. fd, tmpfile = tempfile.mkstemp(prefix=file_prefix, dir=file_dir, suffix='-numpy.npy')
  700. os.close(fd)
  701. try:
  702. for key, val in namedict.items():
  703. fname = key + '.npy'
  704. fid = open(tmpfile, 'wb')
  705. try:
  706. format.write_array(fid, np.asanyarray(val),
  707. allow_pickle=allow_pickle,
  708. pickle_kwargs=pickle_kwargs)
  709. fid.close()
  710. fid = None
  711. zipf.write(tmpfile, arcname=fname)
  712. except IOError as exc:
  713. raise IOError("Failed to write to %s: %s" % (tmpfile, exc))
  714. finally:
  715. if fid:
  716. fid.close()
  717. finally:
  718. os.remove(tmpfile)
  719.  
  720. zipf.close()
  721.  
  722. def _getconv(dtype):
  723. """ Find the correct dtype converter. Adapted from matplotlib """
  724.  
  725. def floatconv(x):
  726. x.lower()
  727. if '0x' in x:
  728. return float.fromhex(x)
  729. return float(x)
  730.  
  731. typ = dtype.type
  732. if issubclass(typ, np.bool_):
  733. return lambda x: bool(int(x))
  734. if issubclass(typ, np.uint64):
  735. return np.uint64
  736. if issubclass(typ, np.int64):
  737. return np.int64
  738. if issubclass(typ, np.integer):
  739. return lambda x: int(float(x))
  740. elif issubclass(typ, np.longdouble):
  741. return np.longdouble
  742. elif issubclass(typ, np.floating):
  743. return floatconv
  744. elif issubclass(typ, complex):
  745. return lambda x: complex(asstr(x))
  746. elif issubclass(typ, np.bytes_):
  747. return asbytes
  748. elif issubclass(typ, np.unicode_):
  749. return asunicode
  750. else:
  751. return asstr
  752.  
  753. # amount of lines loadtxt reads in one chunk, can be overriden for testing
  754. _loadtxt_chunksize = 50000
  755.  
  756. def loadtxt(fname, dtype=float, comments='#', delimiter=None,delimiter2=None,
  757. converters=None, skiprows=0, usecols=None, unpack=False,
  758. ndmin=0, encoding='bytes'):
  759. """
  760. Load data from a text file.
  761.  
  762. Each row in the text file must have the same number of values.
  763.  
  764. Parameters
  765. ----------
  766. fname : file, str, or pathlib.Path
  767. File, filename, or generator to read. If the filename extension is
  768. ``.gz`` or ``.bz2``, the file is first decompressed. Note that
  769. generators should return byte strings for Python 3k.
  770. dtype : data-type, optional
  771. Data-type of the resulting array; default: float. If this is a
  772. structured data-type, the resulting array will be 1-dimensional, and
  773. each row will be interpreted as an element of the array. In this
  774. case, the number of columns used must match the number of fields in
  775. the data-type.
  776. comments : str or sequence of str, optional
  777. The characters or list of characters used to indicate the start of a
  778. comment. For backwards compatibility, byte strings will be decoded as
  779. 'latin1'. The default is '#'.
  780. delimiter : str, optional
  781. The string used to separate values. For backwards compatibility, byte
  782. strings will be decoded as 'latin1'. The default is whitespace.
  783. delimiter2 : str, optional
  784. A=[B delimiter C delimiter2 C delimiter2 C ...]
  785. return [B C C C]
  786. converters : dict, optional
  787. A dictionary mapping column number to a function that will convert
  788. that column to a float. E.g., if column 0 is a date string:
  789. ``converters = {0: datestr2num}``. Converters can also be used to
  790. provide a default value for missing data (but see also `genfromtxt`):
  791. ``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None.
  792. skiprows : int, optional
  793. Skip the first `skiprows` lines; default: 0.
  794. usecols : int or sequence, optional
  795. Which columns to read, with 0 being the first. For example,
  796. usecols = (1,4,5) will extract the 2nd, 5th and 6th columns.
  797. The default, None, results in all columns being read.
  798.  
  799. .. versionchanged:: 1.11.0
  800. When a single column has to be read it is possible to use
  801. an integer instead of a tuple. E.g ``usecols = 3`` reads the
  802. fourth column the same way as `usecols = (3,)`` would.
  803. unpack : bool, optional
  804. If True, the returned array is transposed, so that arguments may be
  805. unpacked using ``x, y, z = loadtxt(...)``. When used with a structured
  806. data-type, arrays are returned for each field. Default is False.
  807. ndmin : int, optional
  808. The returned array will have at least `ndmin` dimensions.
  809. Otherwise mono-dimensional axes will be squeezed.
  810. Legal values: 0 (default), 1 or 2.
  811.  
  812. .. versionadded:: 1.6.0
  813. encoding : str, optional
  814. Encoding used to decode the inputfile. Does not apply to input streams.
  815. The special value 'bytes' enables backward compatibility workarounds
  816. that ensures you receive byte arrays as results if possible and passes
  817. latin1 encoded strings to converters. Override this value to receive
  818. unicode arrays and pass strings as input to converters. If set to None
  819. the system default is used. The default value is 'bytes'.
  820.  
  821. .. versionadded:: 1.14.0
  822.  
  823. Returns
  824. -------
  825. out : ndarray
  826. Data read from the text file.
  827.  
  828. See Also
  829. --------
  830. load, fromstring, fromregex
  831. genfromtxt : Load data with missing values handled as specified.
  832. scipy.io.loadmat : reads MATLAB data files
  833.  
  834. Notes
  835. -----
  836. This function aims to be a fast reader for simply formatted files. The
  837. `genfromtxt` function provides more sophisticated handling of, e.g.,
  838. lines with missing values.
  839.  
  840. .. versionadded:: 1.10.0
  841.  
  842. The strings produced by the Python float.hex method can be used as
  843. input for floats.
  844.  
  845. Examples
  846. --------
  847. >>> from io import StringIO # StringIO behaves like a file object
  848. >>> c = StringIO("0 1\\n2 3")
  849. >>> np.loadtxt(c)
  850. array([[ 0., 1.],
  851. [ 2., 3.]])
  852.  
  853. >>> d = StringIO("M 21 72\\nF 35 58")
  854. >>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
  855. ... 'formats': ('S1', 'i4', 'f4')})
  856. array([('M', 21, 72.0), ('F', 35, 58.0)],
  857. dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
  858.  
  859. >>> c = StringIO("1,0,2\\n3,0,4")
  860. >>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
  861. >>> x
  862. array([ 1., 3.])
  863. >>> y
  864. array([ 2., 4.])
  865.  
  866. """
  867. # Type conversions for Py3 convenience
  868. if comments is not None:
  869. if isinstance(comments, (basestring, bytes)):
  870. comments = [comments]
  871. comments = [_decode_line(x) for x in comments]
  872. # Compile regex for comments beforehand
  873. comments = (re.escape(comment) for comment in comments)
  874. regex_comments = re.compile('|'.join(comments))
  875.  
  876. if delimiter is not None:
  877. delimiter = _decode_line(delimiter)
  878. if delimiter2 is not None:
  879. delimiter2 = _decode_line(delimiter2)
  880.  
  881. user_converters = converters
  882.  
  883. if encoding == 'bytes':
  884. encoding = None
  885. byte_converters = True
  886. else:
  887. byte_converters = False
  888.  
  889. if usecols is not None:
  890. # Allow usecols to be a single int or a sequence of ints
  891. try:
  892. usecols_as_list = list(usecols)
  893. except TypeError:
  894. usecols_as_list = [usecols]
  895. for col_idx in usecols_as_list:
  896. try:
  897. opindex(col_idx)
  898. except TypeError as e:
  899. e.args = (
  900. "usecols must be an int or a sequence of ints but "
  901. "it contains at least one element of type %s" %
  902. type(col_idx),
  903. )
  904. raise
  905. # Fall back to existing code
  906. usecols = usecols_as_list
  907.  
  908. fown = False
  909. try:
  910. if is_pathlib_path(fname):
  911. fname = str(fname)
  912. if _is_string_like(fname):
  913. fh = np.lib._datasource.open(fname, 'rt', encoding=encoding)
  914. fencoding = getattr(fh, 'encoding', 'latin1')
  915. fh = iter(fh)
  916. fown = True
  917. else:
  918. fh = iter(fname)
  919. fencoding = getattr(fname, 'encoding', 'latin1')
  920. except TypeError:
  921. raise ValueError('fname must be a string, file handle, or generator')
  922.  
  923. # input may be a python2 io stream
  924. if encoding is not None:
  925. fencoding = encoding
  926. # we must assume local encoding
  927. # TOOD emit portability warning?
  928. elif fencoding is None:
  929. import locale
  930. fencoding = locale.getpreferredencoding()
  931.  
  932. # not to be confused with the flatten_dtype we import...
  933. def flatten_dtype_internal(dt):
  934. """Unpack a structured data-type, and produce re-packing info."""
  935. if dt.names is None:
  936. # If the dtype is flattened, return.
  937. # If the dtype has a shape, the dtype occurs
  938. # in the list more than once.
  939. shape = dt.shape
  940. if len(shape) == 0:
  941. return ([dt.base], None)
  942. else:
  943. packing = [(shape[-1], list)]
  944. if len(shape) > 1:
  945. for dim in dt.shape[-2::-1]:
  946. packing = [(dim*packing[0][0], packing*dim)]
  947. return ([dt.base] * int(np.prod(dt.shape)), packing)
  948. else:
  949. types = []
  950. packing = []
  951. for field in dt.names:
  952. tp, bytes = dt.fields[field]
  953. flat_dt, flat_packing = flatten_dtype_internal(tp)
  954. types.extend(flat_dt)
  955. # Avoid extra nesting for subarrays
  956. if tp.ndim > 0:
  957. packing.extend(flat_packing)
  958. else:
  959. packing.append((len(flat_dt), flat_packing))
  960. return (types, packing)
  961.  
  962. def pack_items(items, packing):
  963. """Pack items into nested lists based on re-packing info."""
  964. if packing is None:
  965. return items[0]
  966. elif packing is tuple:
  967. return tuple(items)
  968. elif packing is list:
  969. return list(items)
  970. else:
  971. start = 0
  972. ret = []
  973. for length, subpacking in packing:
  974. ret.append(pack_items(items[start:start+length], subpacking))
  975. start += length
  976. return tuple(ret)
  977.  
  978. def split_line(line):
  979. """Chop off comments, strip, and split at delimiter. """
  980. line = _decode_line(line, encoding=encoding)
  981.  
  982. if comments is not None:
  983. line = regex_comments.split(line, maxsplit=1)[0]
  984. line = line.strip('\r\n')
  985. if line:
  986. segms = line.split(delimiter)
  987. if delimiter2 is None:
  988. return segms
  989. else:
  990. segms2 = segms[1].split(delimiter2)
  991. return [segms[0]]+segms2
  992. else:
  993. return []
  994.  
  995. def read_data(chunk_size):
  996. """Parse each line, including the first.
  997.  
  998. The file read, `fh`, is a global defined above.
  999.  
  1000. Parameters
  1001. ----------
  1002. chunk_size : int
  1003. At most `chunk_size` lines are read at a time, with iteration
  1004. until all lines are read.
  1005.  
  1006. """
  1007. X = []
  1008. for i, line in enumerate(itertools.chain([first_line], fh)):
  1009. vals = split_line(line)
  1010. if len(vals) == 0:
  1011. continue
  1012. if usecols:
  1013. vals = [vals[j] for j in usecols]
  1014. if len(vals) != N:
  1015. line_num = i + skiprows + 1
  1016. raise ValueError("Wrong number of columns at line %d"
  1017. % line_num)
  1018.  
  1019. # Convert each value according to its column and store
  1020. items = [conv(val) for (conv, val) in zip(converters, vals)]
  1021.  
  1022. # Then pack it according to the dtype's nesting
  1023. items = pack_items(items, packing)
  1024. X.append(items)
  1025. if len(X) > chunk_size:
  1026. yield X
  1027. X = []
  1028. if X:
  1029. yield X
  1030.  
  1031. try:
  1032. # Make sure we're dealing with a proper dtype
  1033. dtype = np.dtype(dtype)
  1034. defconv = _getconv(dtype)
  1035.  
  1036. # Skip the first `skiprows` lines
  1037. for i in range(skiprows):
  1038. next(fh)
  1039.  
  1040. # Read until we find a line with some values, and use
  1041. # it to estimate the number of columns, N.
  1042. first_vals = None
  1043. try:
  1044. while not first_vals:
  1045. first_line = next(fh)
  1046. first_vals = split_line(first_line)
  1047. except StopIteration:
  1048. # End of lines reached
  1049. first_line = ''
  1050. first_vals = []
  1051. warnings.warn('loadtxt: Empty input file: "%s"' % fname, stacklevel=2)
  1052. N = len(usecols or first_vals)
  1053.  
  1054. dtype_types, packing = flatten_dtype_internal(dtype)
  1055. if len(dtype_types) > 1:
  1056. # We're dealing with a structured array, each field of
  1057. # the dtype matches a column
  1058. converters = [_getconv(dt) for dt in dtype_types]
  1059. else:
  1060. # All fields have the same dtype
  1061. converters = [defconv for i in range(N)]
  1062. if N > 1:
  1063. packing = [(N, tuple)]
  1064.  
  1065. # By preference, use the converters specified by the user
  1066. for i, conv in (user_converters or {}).items():
  1067. if usecols:
  1068. try:
  1069. i = usecols.index(i)
  1070. except ValueError:
  1071. # Unused converter specified
  1072. continue
  1073. if byte_converters:
  1074. # converters may use decode to workaround numpy's old behaviour,
  1075. # so encode the string again before passing to the user converter
  1076. def tobytes_first(x, conv):
  1077. if type(x) is bytes:
  1078. return conv(x)
  1079. return conv(x.encode("latin1"))
  1080. import functools
  1081. converters[i] = functools.partial(tobytes_first, conv=conv)
  1082. else:
  1083. converters[i] = conv
  1084.  
  1085. converters = [conv if conv is not bytes else
  1086. lambda x: x.encode(fencoding) for conv in converters]
  1087.  
  1088. # read data in chunks and fill it into an array via resize
  1089. # over-allocating and shrinking the array later may be faster but is
  1090. # probably not relevant compared to the cost of actually reading and
  1091. # converting the data
  1092. X = None
  1093. for x in read_data(_loadtxt_chunksize):
  1094. if X is None:
  1095. X = np.array(x, dtype)
  1096. else:
  1097. nshape = list(X.shape)
  1098. pos = nshape[0]
  1099. nshape[0] += len(x)
  1100. X.resize(nshape)
  1101. X[pos:, ...] = x
  1102. finally:
  1103. if fown:
  1104. fh.close()
  1105. # recursive closures have a cyclic reference to themselves, which
  1106. # requires gc to collect (gh-10620). To avoid this problem, for
  1107. # performance and PyPy friendliness, we break the cycle:
  1108. flatten_dtype_internal = None
  1109. pack_items = None
  1110.  
  1111. if X is None:
  1112. X = np.array([], dtype)
  1113.  
  1114. # Multicolumn data are returned with shape (1, N, M), i.e.
  1115. # (1, 1, M) for a single row - remove the singleton dimension there
  1116. if X.ndim == 3 and X.shape[:2] == (1, 1):
  1117. X.shape = (1, -1)
  1118.  
  1119. # Verify that the array has at least dimensions `ndmin`.
  1120. # Check correctness of the values of `ndmin`
  1121. if ndmin not in [0, 1, 2]:
  1122. raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
  1123. # Tweak the size and shape of the arrays - remove extraneous dimensions
  1124. if X.ndim > ndmin:
  1125. X = np.squeeze(X)
  1126. # and ensure we have the minimum number of dimensions asked for
  1127. # - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
  1128. if X.ndim < ndmin:
  1129. if ndmin == 1:
  1130. X = np.atleast_1d(X)
  1131. elif ndmin == 2:
  1132. X = np.atleast_2d(X).T
  1133.  
  1134. if unpack:
  1135. if len(dtype_types) > 1:
  1136. # For structured arrays, return an array for each field.
  1137. return [X[field] for field in dtype.names]
  1138. else:
  1139. return X.T
  1140. else:
  1141. return X
  1142.  
  1143. def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
  1144. footer='', comments='# ', encoding=None):
  1145. """
  1146. Save an array to a text file.
  1147.  
  1148. Parameters
  1149. ----------
  1150. fname : filename or file handle
  1151. If the filename ends in ``.gz``, the file is automatically saved in
  1152. compressed gzip format. `loadtxt` understands gzipped files
  1153. transparently.
  1154. X : 1D or 2D array_like
  1155. Data to be saved to a text file.
  1156. fmt : str or sequence of strs, optional
  1157. A single format (%10.5f), a sequence of formats, or a
  1158. multi-format string, e.g. 'Iteration %d -- %10.5f', in which
  1159. case `delimiter` is ignored. For complex `X`, the legal options
  1160. for `fmt` are:
  1161. a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted
  1162. like `' (%s+%sj)' % (fmt, fmt)`
  1163. b) a full string specifying every real and imaginary part, e.g.
  1164. `' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns
  1165. c) a list of specifiers, one per column - in this case, the real
  1166. and imaginary part must have separate specifiers,
  1167. e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
  1168. delimiter : str, optional
  1169. String or character separating columns.
  1170. newline : str, optional
  1171. String or character separating lines.
  1172.  
  1173. .. versionadded:: 1.5.0
  1174. header : str, optional
  1175. String that will be written at the beginning of the file.
  1176.  
  1177. .. versionadded:: 1.7.0
  1178. footer : str, optional
  1179. String that will be written at the end of the file.
  1180.  
  1181. .. versionadded:: 1.7.0
  1182. comments : str, optional
  1183. String that will be prepended to the ``header`` and ``footer`` strings,
  1184. to mark them as comments. Default: '# ', as expected by e.g.
  1185. ``numpy.loadtxt``.
  1186.  
  1187. .. versionadded:: 1.7.0
  1188. encoding : {None, str}, optional
  1189. Encoding used to encode the outputfile. Does not apply to output
  1190. streams. If the encoding is something other than 'bytes' or 'latin1'
  1191. you will not be able to load the file in NumPy versions < 1.14. Default
  1192. is 'latin1'.
  1193.  
  1194. .. versionadded:: 1.14.0
  1195.  
  1196. See Also
  1197. --------
  1198. save : Save an array to a binary file in NumPy ``.npy`` format
  1199. savez : Save several arrays into an uncompressed ``.npz`` archive
  1200. savez_compressed : Save several arrays into a compressed ``.npz`` archive
  1201.  
  1202. Notes
  1203. -----
  1204. Further explanation of the `fmt` parameter
  1205. (``%[flag]width[.precision]specifier``):
  1206.  
  1207. flags:
  1208. ``-`` : left justify
  1209.  
  1210. ``+`` : Forces to precede result with + or -.
  1211.  
  1212. ``0`` : Left pad the number with zeros instead of space (see width).
  1213.  
  1214. width:
  1215. Minimum number of characters to be printed. The value is not truncated
  1216. if it has more characters.
  1217.  
  1218. precision:
  1219. - For integer specifiers (eg. ``d,i,o,x``), the minimum number of
  1220. digits.
  1221. - For ``e, E`` and ``f`` specifiers, the number of digits to print
  1222. after the decimal point.
  1223. - For ``g`` and ``G``, the maximum number of significant digits.
  1224. - For ``s``, the maximum number of characters.
  1225.  
  1226. specifiers:
  1227. ``c`` : character
  1228.  
  1229. ``d`` or ``i`` : signed decimal integer
  1230.  
  1231. ``e`` or ``E`` : scientific notation with ``e`` or ``E``.
  1232.  
  1233. ``f`` : decimal floating point
  1234.  
  1235. ``g,G`` : use the shorter of ``e,E`` or ``f``
  1236.  
  1237. ``o`` : signed octal
  1238.  
  1239. ``s`` : string of characters
  1240.  
  1241. ``u`` : unsigned decimal integer
  1242.  
  1243. ``x,X`` : unsigned hexadecimal integer
  1244.  
  1245. This explanation of ``fmt`` is not complete, for an exhaustive
  1246. specification see [1]_.
  1247.  
  1248. References
  1249. ----------
  1250. .. [1] `Format Specification Mini-Language
  1251. <http://docs.python.org/library/string.html#
  1252. format-specification-mini-language>`_, Python Documentation.
  1253.  
  1254. Examples
  1255. --------
  1256. >>> x = y = z = np.arange(0.0,5.0,1.0)
  1257. >>> np.savetxt('test.out', x, delimiter=',') # X is an array
  1258. >>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
  1259. >>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
  1260.  
  1261. """
  1262.  
  1263. # Py3 conversions first
  1264. if isinstance(fmt, bytes):
  1265. fmt = asstr(fmt)
  1266. delimiter = asstr(delimiter)
  1267.  
  1268. class WriteWrap(object):
  1269. """Convert to unicode in py2 or to bytes on bytestream inputs.
  1270.  
  1271. """
  1272. def __init__(self, fh, encoding):
  1273. self.fh = fh
  1274. self.encoding = encoding
  1275. self.do_write = self.first_write
  1276.  
  1277. def close(self):
  1278. self.fh.close()
  1279.  
  1280. def write(self, v):
  1281. self.do_write(v)
  1282.  
  1283. def write_bytes(self, v):
  1284. if isinstance(v, bytes):
  1285. self.fh.write(v)
  1286. else:
  1287. self.fh.write(v.encode(self.encoding))
  1288.  
  1289. def write_normal(self, v):
  1290. self.fh.write(asunicode(v))
  1291.  
  1292. def first_write(self, v):
  1293. try:
  1294. self.write_normal(v)
  1295. self.write = self.write_normal
  1296. except TypeError:
  1297. # input is probably a bytestream
  1298. self.write_bytes(v)
  1299. self.write = self.write_bytes
  1300.  
  1301. own_fh = False
  1302. if is_pathlib_path(fname):
  1303. fname = str(fname)
  1304. if _is_string_like(fname):
  1305. # datasource doesn't support creating a new file ...
  1306. open(fname, 'wt').close()
  1307. fh = np.lib._datasource.open(fname, 'wt', encoding=encoding)
  1308. own_fh = True
  1309. # need to convert str to unicode for text io output
  1310. if sys.version_info[0] == 2:
  1311. fh = WriteWrap(fh, encoding or 'latin1')
  1312. elif hasattr(fname, 'write'):
  1313. # wrap to handle byte output streams
  1314. fh = WriteWrap(fname, encoding or 'latin1')
  1315. else:
  1316. raise ValueError('fname must be a string or file handle')
  1317.  
  1318. try:
  1319. X = np.asarray(X)
  1320.  
  1321. # Handle 1-dimensional arrays
  1322. if X.ndim == 0 or X.ndim > 2:
  1323. raise ValueError(
  1324. "Expected 1D or 2D array, got %dD array instead" % X.ndim)
  1325. elif X.ndim == 1:
  1326. # Common case -- 1d array of numbers
  1327. if X.dtype.names is None:
  1328. X = np.atleast_2d(X).T
  1329. ncol = 1
  1330.  
  1331. # Complex dtype -- each field indicates a separate column
  1332. else:
  1333. ncol = len(X.dtype.descr)
  1334. else:
  1335. ncol = X.shape[1]
  1336.  
  1337. iscomplex_X = np.iscomplexobj(X)
  1338. # `fmt` can be a string with multiple insertion points or a
  1339. # list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
  1340. if type(fmt) in (list, tuple):
  1341. if len(fmt) != ncol:
  1342. raise AttributeError('fmt has wrong shape. %s' % str(fmt))
  1343. format = asstr(delimiter).join(map(asstr, fmt))
  1344. elif isinstance(fmt, str):
  1345. n_fmt_chars = fmt.count('%')
  1346. error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
  1347. if n_fmt_chars == 1:
  1348. if iscomplex_X:
  1349. fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
  1350. else:
  1351. fmt = [fmt, ] * ncol
  1352. format = delimiter.join(fmt)
  1353. elif iscomplex_X and n_fmt_chars != (2 * ncol):
  1354. raise error
  1355. elif ((not iscomplex_X) and n_fmt_chars != ncol):
  1356. raise error
  1357. else:
  1358. format = fmt
  1359. else:
  1360. raise ValueError('invalid fmt: %r' % (fmt,))
  1361.  
  1362. if len(header) > 0:
  1363. header = header.replace('\n', '\n' + comments)
  1364. fh.write(comments + header + newline)
  1365. if iscomplex_X:
  1366. for row in X:
  1367. row2 = []
  1368. for number in row:
  1369. row2.append(number.real)
  1370. row2.append(number.imag)
  1371. fh.write(format % tuple(row2) + newline)
  1372. else:
  1373. for row in X:
  1374. try:
  1375. v = format % tuple(row) + newline
  1376. except TypeError:
  1377. raise TypeError("Mismatch between array dtype ('%s') and "
  1378. "format specifier ('%s')"
  1379. % (str(X.dtype), format))
  1380. fh.write(v)
  1381.  
  1382. if len(footer) > 0:
  1383. footer = footer.replace('\n', '\n' + comments)
  1384. fh.write(comments + footer + newline)
  1385. finally:
  1386. if own_fh:
  1387. fh.close()
  1388.  
  1389. def fromregex(file, regexp, dtype, encoding=None):
  1390. """
  1391. Construct an array from a text file, using regular expression parsing.
  1392.  
  1393. The returned array is always a structured array, and is constructed from
  1394. all matches of the regular expression in the file. Groups in the regular
  1395. expression are converted to fields of the structured array.
  1396.  
  1397. Parameters
  1398. ----------
  1399. file : str or file
  1400. File name or file object to read.
  1401. regexp : str or regexp
  1402. Regular expression used to parse the file.
  1403. Groups in the regular expression correspond to fields in the dtype.
  1404. dtype : dtype or list of dtypes
  1405. Dtype for the structured array.
  1406. encoding : str, optional
  1407. Encoding used to decode the inputfile. Does not apply to input streams.
  1408.  
  1409. .. versionadded:: 1.14.0
  1410.  
  1411. Returns
  1412. -------
  1413. output : ndarray
  1414. The output array, containing the part of the content of `file` that
  1415. was matched by `regexp`. `output` is always a structured array.
  1416.  
  1417. Raises
  1418. ------
  1419. TypeError
  1420. When `dtype` is not a valid dtype for a structured array.
  1421.  
  1422. See Also
  1423. --------
  1424. fromstring, loadtxt
  1425.  
  1426. Notes
  1427. -----
  1428. Dtypes for structured arrays can be specified in several forms, but all
  1429. forms specify at least the data type and field name. For details see
  1430. `doc.structured_arrays`.
  1431.  
  1432. Examples
  1433. --------
  1434. >>> f = open('test.dat', 'w')
  1435. >>> f.write("1312 foo\\n1534 bar\\n444 qux")
  1436. >>> f.close()
  1437.  
  1438. >>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
  1439. >>> output = np.fromregex('test.dat', regexp,
  1440. ... [('num', np.int64), ('key', 'S3')])
  1441. >>> output
  1442. array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
  1443. dtype=[('num', '<i8'), ('key', '|S3')])
  1444. >>> output['num']
  1445. array([1312, 1534, 444], dtype=int64)
  1446.  
  1447. """
  1448. own_fh = False
  1449. if not hasattr(file, "read"):
  1450. file = np.lib._datasource.open(file, 'rt', encoding=encoding)
  1451. own_fh = True
  1452.  
  1453. try:
  1454. if not isinstance(dtype, np.dtype):
  1455. dtype = np.dtype(dtype)
  1456.  
  1457. content = file.read()
  1458. if isinstance(content, bytes) and not isinstance(regexp, bytes):
  1459. regexp = asbytes(regexp)
  1460. elif not isinstance(content, bytes) and isinstance(regexp, bytes):
  1461. regexp = asstr(regexp)
  1462.  
  1463. if not hasattr(regexp, 'match'):
  1464. regexp = re.compile(regexp)
  1465. seq = regexp.findall(content)
  1466. if seq and not isinstance(seq[0], tuple):
  1467. # Only one group is in the regexp.
  1468. # Create the new array as a single data-type and then
  1469. # re-interpret as a single-field structured array.
  1470. newdtype = np.dtype(dtype[dtype.names[0]])
  1471. output = np.array(seq, dtype=newdtype)
  1472. output.dtype = dtype
  1473. else:
  1474. output = np.array(seq, dtype=dtype)
  1475.  
  1476. return output
  1477. finally:
  1478. if own_fh:
  1479. file.close()
  1480.  
  1481. #####--------------------------------------------------------------------------
  1482. #---- --- ASCII functions ---
  1483. #####--------------------------------------------------------------------------
  1484.  
  1485. def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
  1486. skip_header=0, skip_footer=0, converters=None,
  1487. missing_values=None, filling_values=None, usecols=None,
  1488. names=None, excludelist=None, deletechars=None,
  1489. replace_space='_', autostrip=False, case_sensitive=True,
  1490. defaultfmt="f%i", unpack=None, usemask=False, loose=True,
  1491. invalid_raise=True, max_rows=None, encoding='bytes'):
  1492. """
  1493. Load data from a text file, with missing values handled as specified.
  1494.  
  1495. Each line past the first `skip_header` lines is split at the `delimiter`
  1496. character, and characters following the `comments` character are discarded.
  1497.  
  1498. Parameters
  1499. ----------
  1500. fname : file, str, pathlib.Path, list of str, generator
  1501. File, filename, list, or generator to read. If the filename
  1502. extension is `.gz` or `.bz2`, the file is first decompressed. Note
  1503. that generators must return byte strings in Python 3k. The strings
  1504. in a list or produced by a generator are treated as lines.
  1505. dtype : dtype, optional
  1506. Data type of the resulting array.
  1507. If None, the dtypes will be determined by the contents of each
  1508. column, individually.
  1509. comments : str, optional
  1510. The character used to indicate the start of a comment.
  1511. All the characters occurring on a line after a comment are discarded
  1512. delimiter : str, int, or sequence, optional
  1513. The string used to separate values. By default, any consecutive
  1514. whitespaces act as delimiter. An integer or sequence of integers
  1515. can also be provided as width(s) of each field.
  1516. skiprows : int, optional
  1517. `skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
  1518. skip_header : int, optional
  1519. The number of lines to skip at the beginning of the file.
  1520. skip_footer : int, optional
  1521. The number of lines to skip at the end of the file.
  1522. converters : variable, optional
  1523. The set of functions that convert the data of a column to a value.
  1524. The converters can also be used to provide a default value
  1525. for missing data: ``converters = {3: lambda s: float(s or 0)}``.
  1526. missing : variable, optional
  1527. `missing` was removed in numpy 1.10. Please use `missing_values`
  1528. instead.
  1529. missing_values : variable, optional
  1530. The set of strings corresponding to missing data.
  1531. filling_values : variable, optional
  1532. The set of values to be used as default when the data are missing.
  1533. usecols : sequence, optional
  1534. Which columns to read, with 0 being the first. For example,
  1535. ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
  1536. names : {None, True, str, sequence}, optional
  1537. If `names` is True, the field names are read from the first line after
  1538. the first `skip_header` lines. This line can optionally be proceeded
  1539. by a comment delimeter. If `names` is a sequence or a single-string of
  1540. comma-separated names, the names will be used to define the field names
  1541. in a structured dtype. If `names` is None, the names of the dtype
  1542. fields will be used, if any.
  1543. excludelist : sequence, optional
  1544. A list of names to exclude. This list is appended to the default list
  1545. ['return','file','print']. Excluded names are appended an underscore:
  1546. for example, `file` would become `file_`.
  1547. deletechars : str, optional
  1548. A string combining invalid characters that must be deleted from the
  1549. names.
  1550. defaultfmt : str, optional
  1551. A format used to define default field names, such as "f%i" or "f_%02i".
  1552. autostrip : bool, optional
  1553. Whether to automatically strip white spaces from the variables.
  1554. replace_space : char, optional
  1555. Character(s) used in replacement of white spaces in the variables
  1556. names. By default, use a '_'.
  1557. case_sensitive : {True, False, 'upper', 'lower'}, optional
  1558. If True, field names are case sensitive.
  1559. If False or 'upper', field names are converted to upper case.
  1560. If 'lower', field names are converted to lower case.
  1561. unpack : bool, optional
  1562. If True, the returned array is transposed, so that arguments may be
  1563. unpacked using ``x, y, z = loadtxt(...)``
  1564. usemask : bool, optional
  1565. If True, return a masked array.
  1566. If False, return a regular array.
  1567. loose : bool, optional
  1568. If True, do not raise errors for invalid values.
  1569. invalid_raise : bool, optional
  1570. If True, an exception is raised if an inconsistency is detected in the
  1571. number of columns.
  1572. If False, a warning is emitted and the offending lines are skipped.
  1573. max_rows : int, optional
  1574. The maximum number of rows to read. Must not be used with skip_footer
  1575. at the same time. If given, the value must be at least 1. Default is
  1576. to read the entire file.
  1577.  
  1578. .. versionadded:: 1.10.0
  1579. encoding : str, optional
  1580. Encoding used to decode the inputfile. Does not apply when `fname` is
  1581. a file object. The special value 'bytes' enables backward compatibility
  1582. workarounds that ensure that you receive byte arrays when possible
  1583. and passes latin1 encoded strings to converters. Override this value to
  1584. receive unicode arrays and pass strings as input to converters. If set
  1585. to None the system default is used. The default value is 'bytes'.
  1586.  
  1587. .. versionadded:: 1.14.0
  1588.  
  1589. Returns
  1590. -------
  1591. out : ndarray
  1592. Data read from the text file. If `usemask` is True, this is a
  1593. masked array.
  1594.  
  1595. See Also
  1596. --------
  1597. numpy.loadtxt : equivalent function when no data is missing.
  1598.  
  1599. Notes
  1600. -----
  1601. * When spaces are used as delimiters, or when no delimiter has been given
  1602. as input, there should not be any missing data between two fields.
  1603. * When the variables are named (either by a flexible dtype or with `names`,
  1604. there must not be any header in the file (else a ValueError
  1605. exception is raised).
  1606. * Individual values are not stripped of spaces by default.
  1607. When using a custom converter, make sure the function does remove spaces.
  1608.  
  1609. References
  1610. ----------
  1611. .. [1] NumPy User Guide, section `I/O with NumPy
  1612. <http://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
  1613.  
  1614. Examples
  1615. ---------
  1616. >>> from io import StringIO
  1617. >>> import numpy as np
  1618.  
  1619. Comma delimited file with mixed dtype
  1620.  
  1621. >>> s = StringIO("1,1.3,abcde")
  1622. >>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
  1623. ... ('mystring','S5')], delimiter=",")
  1624. >>> data
  1625. array((1, 1.3, 'abcde'),
  1626. dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
  1627.  
  1628. Using dtype = None
  1629.  
  1630. >>> s.seek(0) # needed for StringIO example only
  1631. >>> data = np.genfromtxt(s, dtype=None,
  1632. ... names = ['myint','myfloat','mystring'], delimiter=",")
  1633. >>> data
  1634. array((1, 1.3, 'abcde'),
  1635. dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
  1636.  
  1637. Specifying dtype and names
  1638.  
  1639. >>> s.seek(0)
  1640. >>> data = np.genfromtxt(s, dtype="i8,f8,S5",
  1641. ... names=['myint','myfloat','mystring'], delimiter=",")
  1642. >>> data
  1643. array((1, 1.3, 'abcde'),
  1644. dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
  1645.  
  1646. An example with fixed-width columns
  1647.  
  1648. >>> s = StringIO("11.3abcde")
  1649. >>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
  1650. ... delimiter=[1,3,5])
  1651. >>> data
  1652. array((1, 1.3, 'abcde'),
  1653. dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
  1654.  
  1655. """
  1656. if max_rows is not None:
  1657. if skip_footer:
  1658. raise ValueError(
  1659. "The keywords 'skip_footer' and 'max_rows' can not be "
  1660. "specified at the same time.")
  1661. if max_rows < 1:
  1662. raise ValueError("'max_rows' must be at least 1.")
  1663.  
  1664. if usemask:
  1665. from numpy.ma import MaskedArray, make_mask_descr
  1666. # Check the input dictionary of converters
  1667. user_converters = converters or {}
  1668. if not isinstance(user_converters, dict):
  1669. raise TypeError(
  1670. "The input argument 'converter' should be a valid dictionary "
  1671. "(got '%s' instead)" % type(user_converters))
  1672.  
  1673. if encoding == 'bytes':
  1674. encoding = None
  1675. byte_converters = True
  1676. else:
  1677. byte_converters = False
  1678.  
  1679. # Initialize the filehandle, the LineSplitter and the NameValidator
  1680. own_fhd = False
  1681. try:
  1682. if is_pathlib_path(fname):
  1683. fname = str(fname)
  1684. if isinstance(fname, basestring):
  1685. fhd = iter(np.lib._datasource.open(fname, 'rt', encoding=encoding))
  1686. own_fhd = True
  1687. else:
  1688. fhd = iter(fname)
  1689. except TypeError:
  1690. raise TypeError(
  1691. "fname must be a string, filehandle, list of strings, "
  1692. "or generator. Got %s instead." % type(fname))
  1693.  
  1694. split_line = LineSplitter(delimiter=delimiter, comments=comments,
  1695. autostrip=autostrip, encoding=encoding)
  1696. validate_names = NameValidator(excludelist=excludelist,
  1697. deletechars=deletechars,
  1698. case_sensitive=case_sensitive,
  1699. replace_space=replace_space)
  1700.  
  1701. # Skip the first `skip_header` rows
  1702. for i in range(skip_header):
  1703. next(fhd)
  1704.  
  1705. # Keep on until we find the first valid values
  1706. first_values = None
  1707. try:
  1708. while not first_values:
  1709. first_line = _decode_line(next(fhd), encoding)
  1710. if names is True:
  1711. if comments in first_line:
  1712. first_line = (
  1713. ''.join(first_line.split(comments)[1:]))
  1714. first_values = split_line(first_line)
  1715. except StopIteration:
  1716. # return an empty array if the datafile is empty
  1717. first_line = ''
  1718. first_values = []
  1719. warnings.warn('genfromtxt: Empty input file: "%s"' % fname, stacklevel=2)
  1720.  
  1721. # Should we take the first values as names ?
  1722. if names is True:
  1723. fval = first_values[0].strip()
  1724. if fval in comments:
  1725. del first_values[0]
  1726.  
  1727. # Check the columns to use: make sure `usecols` is a list
  1728. if usecols is not None:
  1729. try:
  1730. usecols = [_.strip() for _ in usecols.split(",")]
  1731. except AttributeError:
  1732. try:
  1733. usecols = list(usecols)
  1734. except TypeError:
  1735. usecols = [usecols, ]
  1736. nbcols = len(usecols or first_values)
  1737.  
  1738. # Check the names and overwrite the dtype.names if needed
  1739. if names is True:
  1740. names = validate_names([str(_.strip()) for _ in first_values])
  1741. first_line = ''
  1742. elif _is_string_like(names):
  1743. names = validate_names([_.strip() for _ in names.split(',')])
  1744. elif names:
  1745. names = validate_names(names)
  1746. # Get the dtype
  1747. if dtype is not None:
  1748. dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
  1749. excludelist=excludelist,
  1750. deletechars=deletechars,
  1751. case_sensitive=case_sensitive,
  1752. replace_space=replace_space)
  1753. # Make sure the names is a list (for 2.5)
  1754. if names is not None:
  1755. names = list(names)
  1756.  
  1757. if usecols:
  1758. for (i, current) in enumerate(usecols):
  1759. # if usecols is a list of names, convert to a list of indices
  1760. if _is_string_like(current):
  1761. usecols[i] = names.index(current)
  1762. elif current < 0:
  1763. usecols[i] = current + len(first_values)
  1764. # If the dtype is not None, make sure we update it
  1765. if (dtype is not None) and (len(dtype) > nbcols):
  1766. descr = dtype.descr
  1767. dtype = np.dtype([descr[_] for _ in usecols])
  1768. names = list(dtype.names)
  1769. # If `names` is not None, update the names
  1770. elif (names is not None) and (len(names) > nbcols):
  1771. names = [names[_] for _ in usecols]
  1772. elif (names is not None) and (dtype is not None):
  1773. names = list(dtype.names)
  1774.  
  1775. # Process the missing values ...............................
  1776. # Rename missing_values for convenience
  1777. user_missing_values = missing_values or ()
  1778. if isinstance(user_missing_values, bytes):
  1779. user_missing_values = user_missing_values.decode('latin1')
  1780.  
  1781. # Define the list of missing_values (one column: one list)
  1782. missing_values = [list(['']) for _ in range(nbcols)]
  1783.  
  1784. # We have a dictionary: process it field by field
  1785. if isinstance(user_missing_values, dict):
  1786. # Loop on the items
  1787. for (key, val) in user_missing_values.items():
  1788. # Is the key a string ?
  1789. if _is_string_like(key):
  1790. try:
  1791. # Transform it into an integer
  1792. key = names.index(key)
  1793. except ValueError:
  1794. # We couldn't find it: the name must have been dropped
  1795. continue
  1796. # Redefine the key as needed if it's a column number
  1797. if usecols:
  1798. try:
  1799. key = usecols.index(key)
  1800. except ValueError:
  1801. pass
  1802. # Transform the value as a list of string
  1803. if isinstance(val, (list, tuple)):
  1804. val = [str(_) for _ in val]
  1805. else:
  1806. val = [str(val), ]
  1807. # Add the value(s) to the current list of missing
  1808. if key is None:
  1809. # None acts as default
  1810. for miss in missing_values:
  1811. miss.extend(val)
  1812. else:
  1813. missing_values[key].extend(val)
  1814. # We have a sequence : each item matches a column
  1815. elif isinstance(user_missing_values, (list, tuple)):
  1816. for (value, entry) in zip(user_missing_values, missing_values):
  1817. value = str(value)
  1818. if value not in entry:
  1819. entry.append(value)
  1820. # We have a string : apply it to all entries
  1821. elif isinstance(user_missing_values, basestring):
  1822. user_value = user_missing_values.split(",")
  1823. for entry in missing_values:
  1824. entry.extend(user_value)
  1825. # We have something else: apply it to all entries
  1826. else:
  1827. for entry in missing_values:
  1828. entry.extend([str(user_missing_values)])
  1829.  
  1830. # Process the filling_values ...............................
  1831. # Rename the input for convenience
  1832. user_filling_values = filling_values
  1833. if user_filling_values is None:
  1834. user_filling_values = []
  1835. # Define the default
  1836. filling_values = [None] * nbcols
  1837. # We have a dictionary : update each entry individually
  1838. if isinstance(user_filling_values, dict):
  1839. for (key, val) in user_filling_values.items():
  1840. if _is_string_like(key):
  1841. try:
  1842. # Transform it into an integer
  1843. key = names.index(key)
  1844. except ValueError:
  1845. # We couldn't find it: the name must have been dropped,
  1846. continue
  1847. # Redefine the key if it's a column number and usecols is defined
  1848. if usecols:
  1849. try:
  1850. key = usecols.index(key)
  1851. except ValueError:
  1852. pass
  1853. # Add the value to the list
  1854. filling_values[key] = val
  1855. # We have a sequence : update on a one-to-one basis
  1856. elif isinstance(user_filling_values, (list, tuple)):
  1857. n = len(user_filling_values)
  1858. if (n <= nbcols):
  1859. filling_values[:n] = user_filling_values
  1860. else:
  1861. filling_values = user_filling_values[:nbcols]
  1862. # We have something else : use it for all entries
  1863. else:
  1864. filling_values = [user_filling_values] * nbcols
  1865.  
  1866. # Initialize the converters ................................
  1867. if dtype is None:
  1868. # Note: we can't use a [...]*nbcols, as we would have 3 times the same
  1869. # ... converter, instead of 3 different converters.
  1870. converters = [StringConverter(None, missing_values=miss, default=fill)
  1871. for (miss, fill) in zip(missing_values, filling_values)]
  1872. else:
  1873. dtype_flat = flatten_dtype(dtype, flatten_base=True)
  1874. # Initialize the converters
  1875. if len(dtype_flat) > 1:
  1876. # Flexible type : get a converter from each dtype
  1877. zipit = zip(dtype_flat, missing_values, filling_values)
  1878. converters = [StringConverter(dt, locked=True,
  1879. missing_values=miss, default=fill)
  1880. for (dt, miss, fill) in zipit]
  1881. else:
  1882. # Set to a default converter (but w/ different missing values)
  1883. zipit = zip(missing_values, filling_values)
  1884. converters = [StringConverter(dtype, locked=True,
  1885. missing_values=miss, default=fill)
  1886. for (miss, fill) in zipit]
  1887. # Update the converters to use the user-defined ones
  1888. uc_update = []
  1889. for (j, conv) in user_converters.items():
  1890. # If the converter is specified by column names, use the index instead
  1891. if _is_string_like(j):
  1892. try:
  1893. j = names.index(j)
  1894. i = j
  1895. except ValueError:
  1896. continue
  1897. elif usecols:
  1898. try:
  1899. i = usecols.index(j)
  1900. except ValueError:
  1901. # Unused converter specified
  1902. continue
  1903. else:
  1904. i = j
  1905. # Find the value to test - first_line is not filtered by usecols:
  1906. if len(first_line):
  1907. testing_value = first_values[j]
  1908. else:
  1909. testing_value = None
  1910. if conv is bytes:
  1911. user_conv = asbytes
  1912. elif byte_converters:
  1913. # converters may use decode to workaround numpy's old behaviour,
  1914. # so encode the string again before passing to the user converter
  1915. def tobytes_first(x, conv):
  1916. if type(x) is bytes:
  1917. return conv(x)
  1918. return conv(x.encode("latin1"))
  1919. import functools
  1920. user_conv = functools.partial(tobytes_first, conv=conv)
  1921. else:
  1922. user_conv = conv
  1923. converters[i].update(user_conv, locked=True,
  1924. testing_value=testing_value,
  1925. default=filling_values[i],
  1926. missing_values=missing_values[i],)
  1927. uc_update.append((i, user_conv))
  1928. # Make sure we have the corrected keys in user_converters...
  1929. user_converters.update(uc_update)
  1930.  
  1931. # Fixme: possible error as following variable never used.
  1932. # miss_chars = [_.missing_values for _ in converters]
  1933.  
  1934. # Initialize the output lists ...
  1935. # ... rows
  1936. rows = []
  1937. append_to_rows = rows.append
  1938. # ... masks
  1939. if usemask:
  1940. masks = []
  1941. append_to_masks = masks.append
  1942. # ... invalid
  1943. invalid = []
  1944. append_to_invalid = invalid.append
  1945.  
  1946. # Parse each line
  1947. for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
  1948. values = split_line(line)
  1949. nbvalues = len(values)
  1950. # Skip an empty line
  1951. if nbvalues == 0:
  1952. continue
  1953. if usecols:
  1954. # Select only the columns we need
  1955. try:
  1956. values = [values[_] for _ in usecols]
  1957. except IndexError:
  1958. append_to_invalid((i + skip_header + 1, nbvalues))
  1959. continue
  1960. elif nbvalues != nbcols:
  1961. append_to_invalid((i + skip_header + 1, nbvalues))
  1962. continue
  1963. # Store the values
  1964. append_to_rows(tuple(values))
  1965. if usemask:
  1966. append_to_masks(tuple([v.strip() in m
  1967. for (v, m) in zip(values,
  1968. missing_values)]))
  1969. if len(rows) == max_rows:
  1970. break
  1971.  
  1972. if own_fhd:
  1973. fhd.close()
  1974.  
  1975. # Upgrade the converters (if needed)
  1976. if dtype is None:
  1977. for (i, converter) in enumerate(converters):
  1978. current_column = [itemgetter(i)(_m) for _m in rows]
  1979. try:
  1980. converter.iterupgrade(current_column)
  1981. except ConverterLockError:
  1982. errmsg = "Converter #%i is locked and cannot be upgraded: " % i
  1983. current_column = map(itemgetter(i), rows)
  1984. for (j, value) in enumerate(current_column):
  1985. try:
  1986. converter.upgrade(value)
  1987. except (ConverterError, ValueError):
  1988. errmsg += "(occurred line #%i for value '%s')"
  1989. errmsg %= (j + 1 + skip_header, value)
  1990. raise ConverterError(errmsg)
  1991.  
  1992. # Check that we don't have invalid values
  1993. nbinvalid = len(invalid)
  1994. if nbinvalid > 0:
  1995. nbrows = len(rows) + nbinvalid - skip_footer
  1996. # Construct the error message
  1997. template = " Line #%%i (got %%i columns instead of %i)" % nbcols
  1998. if skip_footer > 0:
  1999. nbinvalid_skipped = len([_ for _ in invalid
  2000. if _[0] > nbrows + skip_header])
  2001. invalid = invalid[:nbinvalid - nbinvalid_skipped]
  2002. skip_footer -= nbinvalid_skipped
  2003. #
  2004. # nbrows -= skip_footer
  2005. # errmsg = [template % (i, nb)
  2006. # for (i, nb) in invalid if i < nbrows]
  2007. # else:
  2008. errmsg = [template % (i, nb)
  2009. for (i, nb) in invalid]
  2010. if len(errmsg):
  2011. errmsg.insert(0, "Some errors were detected !")
  2012. errmsg = "\n".join(errmsg)
  2013. # Raise an exception ?
  2014. if invalid_raise:
  2015. raise ValueError(errmsg)
  2016. # Issue a warning ?
  2017. else:
  2018. warnings.warn(errmsg, ConversionWarning, stacklevel=2)
  2019.  
  2020. # Strip the last skip_footer data
  2021. if skip_footer > 0:
  2022. rows = rows[:-skip_footer]
  2023. if usemask:
  2024. masks = masks[:-skip_footer]
  2025.  
  2026. # Convert each value according to the converter:
  2027. # We want to modify the list in place to avoid creating a new one...
  2028. if loose:
  2029. rows = list(
  2030. zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
  2031. for (i, conv) in enumerate(converters)]))
  2032. else:
  2033. rows = list(
  2034. zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
  2035. for (i, conv) in enumerate(converters)]))
  2036.  
  2037. # Reset the dtype
  2038. data = rows
  2039. if dtype is None:
  2040. # Get the dtypes from the types of the converters
  2041. column_types = [conv.type for conv in converters]
  2042. # Find the columns with strings...
  2043. strcolidx = [i for (i, v) in enumerate(column_types)
  2044. if v == np.unicode_]
  2045.  
  2046. if byte_converters and strcolidx:
  2047. # convert strings back to bytes for backward compatibility
  2048. warnings.warn(
  2049. "Reading unicode strings without specifying the encoding "
  2050. "argument is deprecated. Set the encoding, use None for the "
  2051. "system default.",
  2052. np.VisibleDeprecationWarning, stacklevel=2)
  2053. def encode_unicode_cols(row_tup):
  2054. row = list(row_tup)
  2055. for i in strcolidx:
  2056. row[i] = row[i].encode('latin1')
  2057. return tuple(row)
  2058.  
  2059. try:
  2060. data = [encode_unicode_cols(r) for r in data]
  2061. except UnicodeEncodeError:
  2062. pass
  2063. else:
  2064. for i in strcolidx:
  2065. column_types[i] = np.bytes_
  2066.  
  2067. # Update string types to be the right length
  2068. sized_column_types = column_types[:]
  2069. for i, col_type in enumerate(column_types):
  2070. if np.issubdtype(col_type, np.character):
  2071. n_chars = max(len(row[i]) for row in data)
  2072. sized_column_types[i] = (col_type, n_chars)
  2073.  
  2074. if names is None:
  2075. # If the dtype is uniform (before sizing strings)
  2076. base = set([
  2077. c_type
  2078. for c, c_type in zip(converters, column_types)
  2079. if c._checked])
  2080. if len(base) == 1:
  2081. uniform_type, = base
  2082. (ddtype, mdtype) = (uniform_type, bool)
  2083. else:
  2084. ddtype = [(defaultfmt % i, dt)
  2085. for (i, dt) in enumerate(sized_column_types)]
  2086. if usemask:
  2087. mdtype = [(defaultfmt % i, bool)
  2088. for (i, dt) in enumerate(sized_column_types)]
  2089. else:
  2090. ddtype = list(zip(names, sized_column_types))
  2091. mdtype = list(zip(names, [bool] * len(sized_column_types)))
  2092. output = np.array(data, dtype=ddtype)
  2093. if usemask:
  2094. outputmask = np.array(masks, dtype=mdtype)
  2095. else:
  2096. # Overwrite the initial dtype names if needed
  2097. if names and dtype.names:
  2098. dtype.names = names
  2099. # Case 1. We have a structured type
  2100. if len(dtype_flat) > 1:
  2101. # Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
  2102. # First, create the array using a flattened dtype:
  2103. # [('a', int), ('b1', int), ('b2', float)]
  2104. # Then, view the array using the specified dtype.
  2105. if 'O' in (_.char for _ in dtype_flat):
  2106. if has_nested_fields(dtype):
  2107. raise NotImplementedError(
  2108. "Nested fields involving objects are not supported...")
  2109. else:
  2110. output = np.array(data, dtype=dtype)
  2111. else:
  2112. rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
  2113. output = rows.view(dtype)
  2114. # Now, process the rowmasks the same way
  2115. if usemask:
  2116. rowmasks = np.array(
  2117. masks, dtype=np.dtype([('', bool) for t in dtype_flat]))
  2118. # Construct the new dtype
  2119. mdtype = make_mask_descr(dtype)
  2120. outputmask = rowmasks.view(mdtype)
  2121. # Case #2. We have a basic dtype
  2122. else:
  2123. # We used some user-defined converters
  2124. if user_converters:
  2125. ishomogeneous = True
  2126. descr = []
  2127. for i, ttype in enumerate([conv.type for conv in converters]):
  2128. # Keep the dtype of the current converter
  2129. if i in user_converters:
  2130. ishomogeneous &= (ttype == dtype.type)
  2131. if np.issubdtype(ttype, np.character):
  2132. ttype = (ttype, max(len(row[i]) for row in data))
  2133. descr.append(('', ttype))
  2134. else:
  2135. descr.append(('', dtype))
  2136. # So we changed the dtype ?
  2137. if not ishomogeneous:
  2138. # We have more than one field
  2139. if len(descr) > 1:
  2140. dtype = np.dtype(descr)
  2141. # We have only one field: drop the name if not needed.
  2142. else:
  2143. dtype = np.dtype(ttype)
  2144. #
  2145. output = np.array(data, dtype)
  2146. if usemask:
  2147. if dtype.names:
  2148. mdtype = [(_, bool) for _ in dtype.names]
  2149. else:
  2150. mdtype = bool
  2151. outputmask = np.array(masks, dtype=mdtype)
  2152. # Try to take care of the missing data we missed
  2153. names = output.dtype.names
  2154. if usemask and names:
  2155. for (name, conv) in zip(names, converters):
  2156. missing_values = [conv(_) for _ in conv.missing_values
  2157. if _ != '']
  2158. for mval in missing_values:
  2159. outputmask[name] |= (output[name] == mval)
  2160. # Construct the final array
  2161. if usemask:
  2162. output = output.view(MaskedArray)
  2163. output._mask = outputmask
  2164. if unpack:
  2165. return output.squeeze().T
  2166. return output.squeeze()
  2167.  
  2168. def ndfromtxt(fname, **kwargs):
  2169. """
  2170. Load ASCII data stored in a file and return it as a single array.
  2171.  
  2172. Parameters
  2173. ----------
  2174. fname, kwargs : For a description of input parameters, see `genfromtxt`.
  2175.  
  2176. See Also
  2177. --------
  2178. numpy.genfromtxt : generic function.
  2179.  
  2180. """
  2181. kwargs['usemask'] = False
  2182. return genfromtxt(fname, **kwargs)
  2183.  
  2184. def mafromtxt(fname, **kwargs):
  2185. """
  2186. Load ASCII data stored in a text file and return a masked array.
  2187.  
  2188. Parameters
  2189. ----------
  2190. fname, kwargs : For a description of input parameters, see `genfromtxt`.
  2191.  
  2192. See Also
  2193. --------
  2194. numpy.genfromtxt : generic function to load ASCII data.
  2195.  
  2196. """
  2197. kwargs['usemask'] = True
  2198. return genfromtxt(fname, **kwargs)
  2199.  
  2200. def recfromtxt(fname, **kwargs):
  2201. """
  2202. Load ASCII data from a file and return it in a record array.
  2203.  
  2204. If ``usemask=False`` a standard `recarray` is returned,
  2205. if ``usemask=True`` a MaskedRecords array is returned.
  2206.  
  2207. Parameters
  2208. ----------
  2209. fname, kwargs : For a description of input parameters, see `genfromtxt`.
  2210.  
  2211. See Also
  2212. --------
  2213. numpy.genfromtxt : generic function
  2214.  
  2215. Notes
  2216. -----
  2217. By default, `dtype` is None, which means that the data-type of the output
  2218. array will be determined from the data.
  2219.  
  2220. """
  2221. kwargs.setdefault("dtype", None)
  2222. usemask = kwargs.get('usemask', False)
  2223. output = genfromtxt(fname, **kwargs)
  2224. if usemask:
  2225. from numpy.ma.mrecords import MaskedRecords
  2226. output = output.view(MaskedRecords)
  2227. else:
  2228. output = output.view(np.recarray)
  2229. return output
  2230.  
  2231. def recfromcsv(fname, **kwargs):
  2232. """
  2233. Load ASCII data stored in a comma-separated file.
  2234.  
  2235. The returned array is a record array (if ``usemask=False``, see
  2236. `recarray`) or a masked record array (if ``usemask=True``,
  2237. see `ma.mrecords.MaskedRecords`).
  2238.  
  2239. Parameters
  2240. ----------
  2241. fname, kwargs : For a description of input parameters, see `genfromtxt`.
  2242.  
  2243. See Also
  2244. --------
  2245. numpy.genfromtxt : generic function to load ASCII data.
  2246.  
  2247. Notes
  2248. -----
  2249. By default, `dtype` is None, which means that the data-type of the output
  2250. array will be determined from the data.
  2251.  
  2252. """
  2253. # Set default kwargs for genfromtxt as relevant to csv import.
  2254. kwargs.setdefault("case_sensitive", "lower")
  2255. kwargs.setdefault("names", True)
  2256. kwargs.setdefault("delimiter", ",")
  2257. kwargs.setdefault("dtype", None)
  2258. output = genfromtxt(fname, **kwargs)
  2259.  
  2260. usemask = kwargs.get("usemask", False)
  2261. if usemask:
  2262. from numpy.ma.mrecords import MaskedRecords
  2263. output = output.view(MaskedRecords)
  2264. else:
  2265. output = output.view(np.recarray)
  2266. return output

[pytorch修改]npyio.py 实现在标签中使用两种delimiter分割文件的行的更多相关文章

  1. CMake中的两种变量(Variable types in CMake)

    在CMake中存在两种变量:normal variables and cache varialbes .正常变量就像是脚本内部变量,相当于程序设计中定义的局部变量那样.而CMakeLists.txt相 ...

  2. MySQL中的两种临时表

    MySQL中的两种临时表 伯乐在线2016-07-06 05:16:52阅读(4556)评论(3) 声明:本文由入驻搜狐公众平台的作者撰写,除搜狐官方账号外,观点仅代表作者本人,不代表搜狐立场.举报 ...

  3. ORACLE 查询一个数据表后通过遍历再插入另一个表中的两种写法

    ORACLE 查询一个数据表后通过遍历再插入另一个表中的两种写法 语法 第一种: 通过使用Oracle语句块  --指定文档所有部门都能查看 declare cursor TABLE_DEPT and ...

  4. Linux中的两种守护进程stand alone和xinetd

    Linux中的两种守护进程stand alone和xinetd --http://www.cnblogs.com/itech/archive/2010/12/27/1914846.html#top 一 ...

  5. validate插件:验证密码没有空格 用户名是5-10位 至少包含数字和大小写字母中的两种字符

    //校验密码是否含有空格 jQuery.validator.addMethod("notblank", function(value, element) { var pwdblan ...

  6. Crystal Report在.net中的两种显示方式

    Crystal Report在.net中的两种显示方式 编写人:CC阿爸 2014-7-29 近来在完成深圳一公司的项目,对方对各方面要求相当严格,一不满意就拒绝签收,为了对修正水晶报表显示及导出的一 ...

  7. C#中的两种debug方法

    这篇文章主要介绍了C#中的两种debug方法介绍,本文讲解了代码用 #if DEBUG 包裹.利用宏定义两种方法,需要的朋友可以参考下   第一种:需要把调试方法改成debug代码用 #if DEBU ...

  8. eclipse中的两种Jre 及 Jre与Jdk的区别

    分类: ——————————区分eclipse中的两种Jre———————- (Eclipse也是一个普通的Java程序,因此必须有一个JRE做为运行环境.如果你的机器上没有安装任何JRE(或者JDK ...

  9. 在netty3.x中存在两种线程:boss线程和worker线程。

    在netty 3.x 中存在两种线程:boss线程和worker线程.

随机推荐

  1. redis操作封装类

    class Redis {     // 默认配置名称(使用load_config加载) private $_default_config_path = 'package/cache/redis'; ...

  2. 在node环境使用axios发送文件

    yarn add form-data (async () => { const l = console.log; const axios = require("axios") ...

  3. ubuntu git的安装更新及配置

    安装及配置 参考地址:http://blog.csdn.net/qq_26990831/article/details/51857399 1.git 安装 sudo apt-get install g ...

  4. git push fatal: The remote end hung up unexpectedly

    git push fatal: The remote end hung up unexpectedly git config http.postBuffer git gc --aggressive 不 ...

  5. php验证18位身份证,准到必须输入正确的身份证号,

    /** * 验证18位身份证(计算方式在百度百科有) * @param string $id 身份证 * return boolean */ function check_identity($id=' ...

  6. python-获取当前工作路径

    1.sys.argv[0] import sys print sys.argv[0]#获得的是当前执行脚本的位置(若在命令行执行的该命令,则为空) 运行结果(在python脚本中执行的结果): F:/ ...

  7. mdf ldf添加到数据库

    1.拷贝mdf ldf文件到某个文件夹下 2.打开SQL执行语句: USE master; GO CREATE DATABASE NewFile ON (FILENAME = 'C:\Program ...

  8. Spring mvc下Ajax获取JSON对象问题 406错误

    spring 通过@ResponseBody标签返回JSON数据的方法都报406错: Failed to load resource: the server responded with a stat ...

  9. 12.1-uC/OS-III调度的内部实现

    1.调度的内部实现通过这两个函数完成调度功能: OSSched()和OSIntExit().OSSched()在任务级被调用, OSIntExit()在中断级被调用.这两个函数都在OS_CORE.C中 ...

  10. JavaScript 模拟 Dictionary

    function Dictionary() { var items = {}; //判断是否包含Key值 this.has = function(key) { return key in items; ...