m2m模型翻译
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

900 lines
28 KiB

6 months ago
  1. __all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'block', 'hstack',
  2. 'stack', 'vstack']
  3. import functools
  4. import itertools
  5. import operator
  6. import warnings
  7. from . import numeric as _nx
  8. from . import overrides
  9. from .multiarray import array, asanyarray, normalize_axis_index
  10. from . import fromnumeric as _from_nx
  11. array_function_dispatch = functools.partial(
  12. overrides.array_function_dispatch, module='numpy')
  13. def _atleast_1d_dispatcher(*arys):
  14. return arys
  15. @array_function_dispatch(_atleast_1d_dispatcher)
  16. def atleast_1d(*arys):
  17. """
  18. Convert inputs to arrays with at least one dimension.
  19. Scalar inputs are converted to 1-dimensional arrays, whilst
  20. higher-dimensional inputs are preserved.
  21. Parameters
  22. ----------
  23. arys1, arys2, ... : array_like
  24. One or more input arrays.
  25. Returns
  26. -------
  27. ret : ndarray
  28. An array, or list of arrays, each with ``a.ndim >= 1``.
  29. Copies are made only if necessary.
  30. See Also
  31. --------
  32. atleast_2d, atleast_3d
  33. Examples
  34. --------
  35. >>> np.atleast_1d(1.0)
  36. array([1.])
  37. >>> x = np.arange(9.0).reshape(3,3)
  38. >>> np.atleast_1d(x)
  39. array([[0., 1., 2.],
  40. [3., 4., 5.],
  41. [6., 7., 8.]])
  42. >>> np.atleast_1d(x) is x
  43. True
  44. >>> np.atleast_1d(1, [3, 4])
  45. [array([1]), array([3, 4])]
  46. """
  47. res = []
  48. for ary in arys:
  49. ary = asanyarray(ary)
  50. if ary.ndim == 0:
  51. result = ary.reshape(1)
  52. else:
  53. result = ary
  54. res.append(result)
  55. if len(res) == 1:
  56. return res[0]
  57. else:
  58. return res
  59. def _atleast_2d_dispatcher(*arys):
  60. return arys
  61. @array_function_dispatch(_atleast_2d_dispatcher)
  62. def atleast_2d(*arys):
  63. """
  64. View inputs as arrays with at least two dimensions.
  65. Parameters
  66. ----------
  67. arys1, arys2, ... : array_like
  68. One or more array-like sequences. Non-array inputs are converted
  69. to arrays. Arrays that already have two or more dimensions are
  70. preserved.
  71. Returns
  72. -------
  73. res, res2, ... : ndarray
  74. An array, or list of arrays, each with ``a.ndim >= 2``.
  75. Copies are avoided where possible, and views with two or more
  76. dimensions are returned.
  77. See Also
  78. --------
  79. atleast_1d, atleast_3d
  80. Examples
  81. --------
  82. >>> np.atleast_2d(3.0)
  83. array([[3.]])
  84. >>> x = np.arange(3.0)
  85. >>> np.atleast_2d(x)
  86. array([[0., 1., 2.]])
  87. >>> np.atleast_2d(x).base is x
  88. True
  89. >>> np.atleast_2d(1, [1, 2], [[1, 2]])
  90. [array([[1]]), array([[1, 2]]), array([[1, 2]])]
  91. """
  92. res = []
  93. for ary in arys:
  94. ary = asanyarray(ary)
  95. if ary.ndim == 0:
  96. result = ary.reshape(1, 1)
  97. elif ary.ndim == 1:
  98. result = ary[_nx.newaxis, :]
  99. else:
  100. result = ary
  101. res.append(result)
  102. if len(res) == 1:
  103. return res[0]
  104. else:
  105. return res
  106. def _atleast_3d_dispatcher(*arys):
  107. return arys
  108. @array_function_dispatch(_atleast_3d_dispatcher)
  109. def atleast_3d(*arys):
  110. """
  111. View inputs as arrays with at least three dimensions.
  112. Parameters
  113. ----------
  114. arys1, arys2, ... : array_like
  115. One or more array-like sequences. Non-array inputs are converted to
  116. arrays. Arrays that already have three or more dimensions are
  117. preserved.
  118. Returns
  119. -------
  120. res1, res2, ... : ndarray
  121. An array, or list of arrays, each with ``a.ndim >= 3``. Copies are
  122. avoided where possible, and views with three or more dimensions are
  123. returned. For example, a 1-D array of shape ``(N,)`` becomes a view
  124. of shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a
  125. view of shape ``(M, N, 1)``.
  126. See Also
  127. --------
  128. atleast_1d, atleast_2d
  129. Examples
  130. --------
  131. >>> np.atleast_3d(3.0)
  132. array([[[3.]]])
  133. >>> x = np.arange(3.0)
  134. >>> np.atleast_3d(x).shape
  135. (1, 3, 1)
  136. >>> x = np.arange(12.0).reshape(4,3)
  137. >>> np.atleast_3d(x).shape
  138. (4, 3, 1)
  139. >>> np.atleast_3d(x).base is x.base # x is a reshape, so not base itself
  140. True
  141. >>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]):
  142. ... print(arr, arr.shape) # doctest: +SKIP
  143. ...
  144. [[[1]
  145. [2]]] (1, 2, 1)
  146. [[[1]
  147. [2]]] (1, 2, 1)
  148. [[[1 2]]] (1, 1, 2)
  149. """
  150. res = []
  151. for ary in arys:
  152. ary = asanyarray(ary)
  153. if ary.ndim == 0:
  154. result = ary.reshape(1, 1, 1)
  155. elif ary.ndim == 1:
  156. result = ary[_nx.newaxis, :, _nx.newaxis]
  157. elif ary.ndim == 2:
  158. result = ary[:, :, _nx.newaxis]
  159. else:
  160. result = ary
  161. res.append(result)
  162. if len(res) == 1:
  163. return res[0]
  164. else:
  165. return res
  166. def _arrays_for_stack_dispatcher(arrays, stacklevel=4):
  167. if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):
  168. warnings.warn('arrays to stack must be passed as a "sequence" type '
  169. 'such as list or tuple. Support for non-sequence '
  170. 'iterables such as generators is deprecated as of '
  171. 'NumPy 1.16 and will raise an error in the future.',
  172. FutureWarning, stacklevel=stacklevel)
  173. return ()
  174. return arrays
  175. def _vhstack_dispatcher(tup):
  176. return _arrays_for_stack_dispatcher(tup)
  177. @array_function_dispatch(_vhstack_dispatcher)
  178. def vstack(tup):
  179. """
  180. Stack arrays in sequence vertically (row wise).
  181. This is equivalent to concatenation along the first axis after 1-D arrays
  182. of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by
  183. `vsplit`.
  184. This function makes most sense for arrays with up to 3 dimensions. For
  185. instance, for pixel-data with a height (first axis), width (second axis),
  186. and r/g/b channels (third axis). The functions `concatenate`, `stack` and
  187. `block` provide more general stacking and concatenation operations.
  188. Parameters
  189. ----------
  190. tup : sequence of ndarrays
  191. The arrays must have the same shape along all but the first axis.
  192. 1-D arrays must have the same length.
  193. Returns
  194. -------
  195. stacked : ndarray
  196. The array formed by stacking the given arrays, will be at least 2-D.
  197. See Also
  198. --------
  199. concatenate : Join a sequence of arrays along an existing axis.
  200. stack : Join a sequence of arrays along a new axis.
  201. block : Assemble an nd-array from nested lists of blocks.
  202. hstack : Stack arrays in sequence horizontally (column wise).
  203. dstack : Stack arrays in sequence depth wise (along third axis).
  204. column_stack : Stack 1-D arrays as columns into a 2-D array.
  205. vsplit : Split an array into multiple sub-arrays vertically (row-wise).
  206. Examples
  207. --------
  208. >>> a = np.array([1, 2, 3])
  209. >>> b = np.array([4, 5, 6])
  210. >>> np.vstack((a,b))
  211. array([[1, 2, 3],
  212. [4, 5, 6]])
  213. >>> a = np.array([[1], [2], [3]])
  214. >>> b = np.array([[4], [5], [6]])
  215. >>> np.vstack((a,b))
  216. array([[1],
  217. [2],
  218. [3],
  219. [4],
  220. [5],
  221. [6]])
  222. """
  223. if not overrides.ARRAY_FUNCTION_ENABLED:
  224. # raise warning if necessary
  225. _arrays_for_stack_dispatcher(tup, stacklevel=2)
  226. arrs = atleast_2d(*tup)
  227. if not isinstance(arrs, list):
  228. arrs = [arrs]
  229. return _nx.concatenate(arrs, 0)
  230. @array_function_dispatch(_vhstack_dispatcher)
  231. def hstack(tup):
  232. """
  233. Stack arrays in sequence horizontally (column wise).
  234. This is equivalent to concatenation along the second axis, except for 1-D
  235. arrays where it concatenates along the first axis. Rebuilds arrays divided
  236. by `hsplit`.
  237. This function makes most sense for arrays with up to 3 dimensions. For
  238. instance, for pixel-data with a height (first axis), width (second axis),
  239. and r/g/b channels (third axis). The functions `concatenate`, `stack` and
  240. `block` provide more general stacking and concatenation operations.
  241. Parameters
  242. ----------
  243. tup : sequence of ndarrays
  244. The arrays must have the same shape along all but the second axis,
  245. except 1-D arrays which can be any length.
  246. Returns
  247. -------
  248. stacked : ndarray
  249. The array formed by stacking the given arrays.
  250. See Also
  251. --------
  252. concatenate : Join a sequence of arrays along an existing axis.
  253. stack : Join a sequence of arrays along a new axis.
  254. block : Assemble an nd-array from nested lists of blocks.
  255. vstack : Stack arrays in sequence vertically (row wise).
  256. dstack : Stack arrays in sequence depth wise (along third axis).
  257. column_stack : Stack 1-D arrays as columns into a 2-D array.
  258. hsplit : Split an array into multiple sub-arrays horizontally (column-wise).
  259. Examples
  260. --------
  261. >>> a = np.array((1,2,3))
  262. >>> b = np.array((4,5,6))
  263. >>> np.hstack((a,b))
  264. array([1, 2, 3, 4, 5, 6])
  265. >>> a = np.array([[1],[2],[3]])
  266. >>> b = np.array([[4],[5],[6]])
  267. >>> np.hstack((a,b))
  268. array([[1, 4],
  269. [2, 5],
  270. [3, 6]])
  271. """
  272. if not overrides.ARRAY_FUNCTION_ENABLED:
  273. # raise warning if necessary
  274. _arrays_for_stack_dispatcher(tup, stacklevel=2)
  275. arrs = atleast_1d(*tup)
  276. if not isinstance(arrs, list):
  277. arrs = [arrs]
  278. # As a special case, dimension 0 of 1-dimensional arrays is "horizontal"
  279. if arrs and arrs[0].ndim == 1:
  280. return _nx.concatenate(arrs, 0)
  281. else:
  282. return _nx.concatenate(arrs, 1)
  283. def _stack_dispatcher(arrays, axis=None, out=None):
  284. arrays = _arrays_for_stack_dispatcher(arrays, stacklevel=6)
  285. if out is not None:
  286. # optimize for the typical case where only arrays is provided
  287. arrays = list(arrays)
  288. arrays.append(out)
  289. return arrays
  290. @array_function_dispatch(_stack_dispatcher)
  291. def stack(arrays, axis=0, out=None):
  292. """
  293. Join a sequence of arrays along a new axis.
  294. The ``axis`` parameter specifies the index of the new axis in the
  295. dimensions of the result. For example, if ``axis=0`` it will be the first
  296. dimension and if ``axis=-1`` it will be the last dimension.
  297. .. versionadded:: 1.10.0
  298. Parameters
  299. ----------
  300. arrays : sequence of array_like
  301. Each array must have the same shape.
  302. axis : int, optional
  303. The axis in the result array along which the input arrays are stacked.
  304. out : ndarray, optional
  305. If provided, the destination to place the result. The shape must be
  306. correct, matching that of what stack would have returned if no
  307. out argument were specified.
  308. Returns
  309. -------
  310. stacked : ndarray
  311. The stacked array has one more dimension than the input arrays.
  312. See Also
  313. --------
  314. concatenate : Join a sequence of arrays along an existing axis.
  315. block : Assemble an nd-array from nested lists of blocks.
  316. split : Split array into a list of multiple sub-arrays of equal size.
  317. Examples
  318. --------
  319. >>> arrays = [np.random.randn(3, 4) for _ in range(10)]
  320. >>> np.stack(arrays, axis=0).shape
  321. (10, 3, 4)
  322. >>> np.stack(arrays, axis=1).shape
  323. (3, 10, 4)
  324. >>> np.stack(arrays, axis=2).shape
  325. (3, 4, 10)
  326. >>> a = np.array([1, 2, 3])
  327. >>> b = np.array([4, 5, 6])
  328. >>> np.stack((a, b))
  329. array([[1, 2, 3],
  330. [4, 5, 6]])
  331. >>> np.stack((a, b), axis=-1)
  332. array([[1, 4],
  333. [2, 5],
  334. [3, 6]])
  335. """
  336. if not overrides.ARRAY_FUNCTION_ENABLED:
  337. # raise warning if necessary
  338. _arrays_for_stack_dispatcher(arrays, stacklevel=2)
  339. arrays = [asanyarray(arr) for arr in arrays]
  340. if not arrays:
  341. raise ValueError('need at least one array to stack')
  342. shapes = {arr.shape for arr in arrays}
  343. if len(shapes) != 1:
  344. raise ValueError('all input arrays must have the same shape')
  345. result_ndim = arrays[0].ndim + 1
  346. axis = normalize_axis_index(axis, result_ndim)
  347. sl = (slice(None),) * axis + (_nx.newaxis,)
  348. expanded_arrays = [arr[sl] for arr in arrays]
  349. return _nx.concatenate(expanded_arrays, axis=axis, out=out)
  350. # Internal functions to eliminate the overhead of repeated dispatch in one of
  351. # the two possible paths inside np.block.
  352. # Use getattr to protect against __array_function__ being disabled.
  353. _size = getattr(_from_nx.size, '__wrapped__', _from_nx.size)
  354. _ndim = getattr(_from_nx.ndim, '__wrapped__', _from_nx.ndim)
  355. _concatenate = getattr(_from_nx.concatenate, '__wrapped__', _from_nx.concatenate)
  356. def _block_format_index(index):
  357. """
  358. Convert a list of indices ``[0, 1, 2]`` into ``"arrays[0][1][2]"``.
  359. """
  360. idx_str = ''.join('[{}]'.format(i) for i in index if i is not None)
  361. return 'arrays' + idx_str
  362. def _block_check_depths_match(arrays, parent_index=[]):
  363. """
  364. Recursive function checking that the depths of nested lists in `arrays`
  365. all match. Mismatch raises a ValueError as described in the block
  366. docstring below.
  367. The entire index (rather than just the depth) needs to be calculated
  368. for each innermost list, in case an error needs to be raised, so that
  369. the index of the offending list can be printed as part of the error.
  370. Parameters
  371. ----------
  372. arrays : nested list of arrays
  373. The arrays to check
  374. parent_index : list of int
  375. The full index of `arrays` within the nested lists passed to
  376. `_block_check_depths_match` at the top of the recursion.
  377. Returns
  378. -------
  379. first_index : list of int
  380. The full index of an element from the bottom of the nesting in
  381. `arrays`. If any element at the bottom is an empty list, this will
  382. refer to it, and the last index along the empty axis will be None.
  383. max_arr_ndim : int
  384. The maximum of the ndims of the arrays nested in `arrays`.
  385. final_size: int
  386. The number of elements in the final array. This is used the motivate
  387. the choice of algorithm used using benchmarking wisdom.
  388. """
  389. if type(arrays) is tuple:
  390. # not strictly necessary, but saves us from:
  391. # - more than one way to do things - no point treating tuples like
  392. # lists
  393. # - horribly confusing behaviour that results when tuples are
  394. # treated like ndarray
  395. raise TypeError(
  396. '{} is a tuple. '
  397. 'Only lists can be used to arrange blocks, and np.block does '
  398. 'not allow implicit conversion from tuple to ndarray.'.format(
  399. _block_format_index(parent_index)
  400. )
  401. )
  402. elif type(arrays) is list and len(arrays) > 0:
  403. idxs_ndims = (_block_check_depths_match(arr, parent_index + [i])
  404. for i, arr in enumerate(arrays))
  405. first_index, max_arr_ndim, final_size = next(idxs_ndims)
  406. for index, ndim, size in idxs_ndims:
  407. final_size += size
  408. if ndim > max_arr_ndim:
  409. max_arr_ndim = ndim
  410. if len(index) != len(first_index):
  411. raise ValueError(
  412. "List depths are mismatched. First element was at depth "
  413. "{}, but there is an element at depth {} ({})".format(
  414. len(first_index),
  415. len(index),
  416. _block_format_index(index)
  417. )
  418. )
  419. # propagate our flag that indicates an empty list at the bottom
  420. if index[-1] is None:
  421. first_index = index
  422. return first_index, max_arr_ndim, final_size
  423. elif type(arrays) is list and len(arrays) == 0:
  424. # We've 'bottomed out' on an empty list
  425. return parent_index + [None], 0, 0
  426. else:
  427. # We've 'bottomed out' - arrays is either a scalar or an array
  428. size = _size(arrays)
  429. return parent_index, _ndim(arrays), size
  430. def _atleast_nd(a, ndim):
  431. # Ensures `a` has at least `ndim` dimensions by prepending
  432. # ones to `a.shape` as necessary
  433. return array(a, ndmin=ndim, copy=False, subok=True)
  434. def _accumulate(values):
  435. return list(itertools.accumulate(values))
  436. def _concatenate_shapes(shapes, axis):
  437. """Given array shapes, return the resulting shape and slices prefixes.
  438. These help in nested concatenation.
  439. Returns
  440. -------
  441. shape: tuple of int
  442. This tuple satisfies:
  443. ```
  444. shape, _ = _concatenate_shapes([arr.shape for shape in arrs], axis)
  445. shape == concatenate(arrs, axis).shape
  446. ```
  447. slice_prefixes: tuple of (slice(start, end), )
  448. For a list of arrays being concatenated, this returns the slice
  449. in the larger array at axis that needs to be sliced into.
  450. For example, the following holds:
  451. ```
  452. ret = concatenate([a, b, c], axis)
  453. _, (sl_a, sl_b, sl_c) = concatenate_slices([a, b, c], axis)
  454. ret[(slice(None),) * axis + sl_a] == a
  455. ret[(slice(None),) * axis + sl_b] == b
  456. ret[(slice(None),) * axis + sl_c] == c
  457. ```
  458. These are called slice prefixes since they are used in the recursive
  459. blocking algorithm to compute the left-most slices during the
  460. recursion. Therefore, they must be prepended to rest of the slice
  461. that was computed deeper in the recursion.
  462. These are returned as tuples to ensure that they can quickly be added
  463. to existing slice tuple without creating a new tuple every time.
  464. """
  465. # Cache a result that will be reused.
  466. shape_at_axis = [shape[axis] for shape in shapes]
  467. # Take a shape, any shape
  468. first_shape = shapes[0]
  469. first_shape_pre = first_shape[:axis]
  470. first_shape_post = first_shape[axis+1:]
  471. if any(shape[:axis] != first_shape_pre or
  472. shape[axis+1:] != first_shape_post for shape in shapes):
  473. raise ValueError(
  474. 'Mismatched array shapes in block along axis {}.'.format(axis))
  475. shape = (first_shape_pre + (sum(shape_at_axis),) + first_shape[axis+1:])
  476. offsets_at_axis = _accumulate(shape_at_axis)
  477. slice_prefixes = [(slice(start, end),)
  478. for start, end in zip([0] + offsets_at_axis,
  479. offsets_at_axis)]
  480. return shape, slice_prefixes
  481. def _block_info_recursion(arrays, max_depth, result_ndim, depth=0):
  482. """
  483. Returns the shape of the final array, along with a list
  484. of slices and a list of arrays that can be used for assignment inside the
  485. new array
  486. Parameters
  487. ----------
  488. arrays : nested list of arrays
  489. The arrays to check
  490. max_depth : list of int
  491. The number of nested lists
  492. result_ndim : int
  493. The number of dimensions in thefinal array.
  494. Returns
  495. -------
  496. shape : tuple of int
  497. The shape that the final array will take on.
  498. slices: list of tuple of slices
  499. The slices into the full array required for assignment. These are
  500. required to be prepended with ``(Ellipsis, )`` to obtain to correct
  501. final index.
  502. arrays: list of ndarray
  503. The data to assign to each slice of the full array
  504. """
  505. if depth < max_depth:
  506. shapes, slices, arrays = zip(
  507. *[_block_info_recursion(arr, max_depth, result_ndim, depth+1)
  508. for arr in arrays])
  509. axis = result_ndim - max_depth + depth
  510. shape, slice_prefixes = _concatenate_shapes(shapes, axis)
  511. # Prepend the slice prefix and flatten the slices
  512. slices = [slice_prefix + the_slice
  513. for slice_prefix, inner_slices in zip(slice_prefixes, slices)
  514. for the_slice in inner_slices]
  515. # Flatten the array list
  516. arrays = functools.reduce(operator.add, arrays)
  517. return shape, slices, arrays
  518. else:
  519. # We've 'bottomed out' - arrays is either a scalar or an array
  520. # type(arrays) is not list
  521. # Return the slice and the array inside a list to be consistent with
  522. # the recursive case.
  523. arr = _atleast_nd(arrays, result_ndim)
  524. return arr.shape, [()], [arr]
  525. def _block(arrays, max_depth, result_ndim, depth=0):
  526. """
  527. Internal implementation of block based on repeated concatenation.
  528. `arrays` is the argument passed to
  529. block. `max_depth` is the depth of nested lists within `arrays` and
  530. `result_ndim` is the greatest of the dimensions of the arrays in
  531. `arrays` and the depth of the lists in `arrays` (see block docstring
  532. for details).
  533. """
  534. if depth < max_depth:
  535. arrs = [_block(arr, max_depth, result_ndim, depth+1)
  536. for arr in arrays]
  537. return _concatenate(arrs, axis=-(max_depth-depth))
  538. else:
  539. # We've 'bottomed out' - arrays is either a scalar or an array
  540. # type(arrays) is not list
  541. return _atleast_nd(arrays, result_ndim)
  542. def _block_dispatcher(arrays):
  543. # Use type(...) is list to match the behavior of np.block(), which special
  544. # cases list specifically rather than allowing for generic iterables or
  545. # tuple. Also, we know that list.__array_function__ will never exist.
  546. if type(arrays) is list:
  547. for subarrays in arrays:
  548. yield from _block_dispatcher(subarrays)
  549. else:
  550. yield arrays
  551. @array_function_dispatch(_block_dispatcher)
  552. def block(arrays):
  553. """
  554. Assemble an nd-array from nested lists of blocks.
  555. Blocks in the innermost lists are concatenated (see `concatenate`) along
  556. the last dimension (-1), then these are concatenated along the
  557. second-last dimension (-2), and so on until the outermost list is reached.
  558. Blocks can be of any dimension, but will not be broadcasted using the normal
  559. rules. Instead, leading axes of size 1 are inserted, to make ``block.ndim``
  560. the same for all blocks. This is primarily useful for working with scalars,
  561. and means that code like ``np.block([v, 1])`` is valid, where
  562. ``v.ndim == 1``.
  563. When the nested list is two levels deep, this allows block matrices to be
  564. constructed from their components.
  565. .. versionadded:: 1.13.0
  566. Parameters
  567. ----------
  568. arrays : nested list of array_like or scalars (but not tuples)
  569. If passed a single ndarray or scalar (a nested list of depth 0), this
  570. is returned unmodified (and not copied).
  571. Elements shapes must match along the appropriate axes (without
  572. broadcasting), but leading 1s will be prepended to the shape as
  573. necessary to make the dimensions match.
  574. Returns
  575. -------
  576. block_array : ndarray
  577. The array assembled from the given blocks.
  578. The dimensionality of the output is equal to the greatest of:
  579. * the dimensionality of all the inputs
  580. * the depth to which the input list is nested
  581. Raises
  582. ------
  583. ValueError
  584. * If list depths are mismatched - for instance, ``[[a, b], c]`` is
  585. illegal, and should be spelt ``[[a, b], [c]]``
  586. * If lists are empty - for instance, ``[[a, b], []]``
  587. See Also
  588. --------
  589. concatenate : Join a sequence of arrays along an existing axis.
  590. stack : Join a sequence of arrays along a new axis.
  591. vstack : Stack arrays in sequence vertically (row wise).
  592. hstack : Stack arrays in sequence horizontally (column wise).
  593. dstack : Stack arrays in sequence depth wise (along third axis).
  594. column_stack : Stack 1-D arrays as columns into a 2-D array.
  595. vsplit : Split an array into multiple sub-arrays vertically (row-wise).
  596. Notes
  597. -----
  598. When called with only scalars, ``np.block`` is equivalent to an ndarray
  599. call. So ``np.block([[1, 2], [3, 4]])`` is equivalent to
  600. ``np.array([[1, 2], [3, 4]])``.
  601. This function does not enforce that the blocks lie on a fixed grid.
  602. ``np.block([[a, b], [c, d]])`` is not restricted to arrays of the form::
  603. AAAbb
  604. AAAbb
  605. cccDD
  606. But is also allowed to produce, for some ``a, b, c, d``::
  607. AAAbb
  608. AAAbb
  609. cDDDD
  610. Since concatenation happens along the last axis first, `block` is _not_
  611. capable of producing the following directly::
  612. AAAbb
  613. cccbb
  614. cccDD
  615. Matlab's "square bracket stacking", ``[A, B, ...; p, q, ...]``, is
  616. equivalent to ``np.block([[A, B, ...], [p, q, ...]])``.
  617. Examples
  618. --------
  619. The most common use of this function is to build a block matrix
  620. >>> A = np.eye(2) * 2
  621. >>> B = np.eye(3) * 3
  622. >>> np.block([
  623. ... [A, np.zeros((2, 3))],
  624. ... [np.ones((3, 2)), B ]
  625. ... ])
  626. array([[2., 0., 0., 0., 0.],
  627. [0., 2., 0., 0., 0.],
  628. [1., 1., 3., 0., 0.],
  629. [1., 1., 0., 3., 0.],
  630. [1., 1., 0., 0., 3.]])
  631. With a list of depth 1, `block` can be used as `hstack`
  632. >>> np.block([1, 2, 3]) # hstack([1, 2, 3])
  633. array([1, 2, 3])
  634. >>> a = np.array([1, 2, 3])
  635. >>> b = np.array([4, 5, 6])
  636. >>> np.block([a, b, 10]) # hstack([a, b, 10])
  637. array([ 1, 2, 3, 4, 5, 6, 10])
  638. >>> A = np.ones((2, 2), int)
  639. >>> B = 2 * A
  640. >>> np.block([A, B]) # hstack([A, B])
  641. array([[1, 1, 2, 2],
  642. [1, 1, 2, 2]])
  643. With a list of depth 2, `block` can be used in place of `vstack`:
  644. >>> a = np.array([1, 2, 3])
  645. >>> b = np.array([4, 5, 6])
  646. >>> np.block([[a], [b]]) # vstack([a, b])
  647. array([[1, 2, 3],
  648. [4, 5, 6]])
  649. >>> A = np.ones((2, 2), int)
  650. >>> B = 2 * A
  651. >>> np.block([[A], [B]]) # vstack([A, B])
  652. array([[1, 1],
  653. [1, 1],
  654. [2, 2],
  655. [2, 2]])
  656. It can also be used in places of `atleast_1d` and `atleast_2d`
  657. >>> a = np.array(0)
  658. >>> b = np.array([1])
  659. >>> np.block([a]) # atleast_1d(a)
  660. array([0])
  661. >>> np.block([b]) # atleast_1d(b)
  662. array([1])
  663. >>> np.block([[a]]) # atleast_2d(a)
  664. array([[0]])
  665. >>> np.block([[b]]) # atleast_2d(b)
  666. array([[1]])
  667. """
  668. arrays, list_ndim, result_ndim, final_size = _block_setup(arrays)
  669. # It was found through benchmarking that making an array of final size
  670. # around 256x256 was faster by straight concatenation on a
  671. # i7-7700HQ processor and dual channel ram 2400MHz.
  672. # It didn't seem to matter heavily on the dtype used.
  673. #
  674. # A 2D array using repeated concatenation requires 2 copies of the array.
  675. #
  676. # The fastest algorithm will depend on the ratio of CPU power to memory
  677. # speed.
  678. # One can monitor the results of the benchmark
  679. # https://pv.github.io/numpy-bench/#bench_shape_base.Block2D.time_block2d
  680. # to tune this parameter until a C version of the `_block_info_recursion`
  681. # algorithm is implemented which would likely be faster than the python
  682. # version.
  683. if list_ndim * final_size > (2 * 512 * 512):
  684. return _block_slicing(arrays, list_ndim, result_ndim)
  685. else:
  686. return _block_concatenate(arrays, list_ndim, result_ndim)
  687. # These helper functions are mostly used for testing.
  688. # They allow us to write tests that directly call `_block_slicing`
  689. # or `_block_concatenate` without blocking large arrays to force the wisdom
  690. # to trigger the desired path.
  691. def _block_setup(arrays):
  692. """
  693. Returns
  694. (`arrays`, list_ndim, result_ndim, final_size)
  695. """
  696. bottom_index, arr_ndim, final_size = _block_check_depths_match(arrays)
  697. list_ndim = len(bottom_index)
  698. if bottom_index and bottom_index[-1] is None:
  699. raise ValueError(
  700. 'List at {} cannot be empty'.format(
  701. _block_format_index(bottom_index)
  702. )
  703. )
  704. result_ndim = max(arr_ndim, list_ndim)
  705. return arrays, list_ndim, result_ndim, final_size
  706. def _block_slicing(arrays, list_ndim, result_ndim):
  707. shape, slices, arrays = _block_info_recursion(
  708. arrays, list_ndim, result_ndim)
  709. dtype = _nx.result_type(*[arr.dtype for arr in arrays])
  710. # Test preferring F only in the case that all input arrays are F
  711. F_order = all(arr.flags['F_CONTIGUOUS'] for arr in arrays)
  712. C_order = all(arr.flags['C_CONTIGUOUS'] for arr in arrays)
  713. order = 'F' if F_order and not C_order else 'C'
  714. result = _nx.empty(shape=shape, dtype=dtype, order=order)
  715. # Note: In a c implementation, the function
  716. # PyArray_CreateMultiSortedStridePerm could be used for more advanced
  717. # guessing of the desired order.
  718. for the_slice, arr in zip(slices, arrays):
  719. result[(Ellipsis,) + the_slice] = arr
  720. return result
  721. def _block_concatenate(arrays, list_ndim, result_ndim):
  722. result = _block(arrays, list_ndim, result_ndim)
  723. if list_ndim == 0:
  724. # Catch an edge case where _block returns a view because
  725. # `arrays` is a single numpy array and not a list of numpy arrays.
  726. # This might copy scalars or lists twice, but this isn't a likely
  727. # usecase for those interested in performance
  728. result = result.copy()
  729. return result