"""
Create the numpy.core.multiarray namespace for backward compatibility. In v1.16
the multiarray and umath c-extension modules were merged into a single
_multiarray_umath extension module. So we replicate the old namespace
by importing from the extension module.
"""
import functools
import sys
import warnings
import sys
from . import overrides
from . import _multiarray_umath
import numpy as np
from numpy.core._multiarray_umath import *
from numpy.core._multiarray_umath import (
_fastCopyAndTranspose, _flagdict, _insert, _reconstruct, _vec_string,
_ARRAY_API, _monotonicity, _get_ndarray_c_version
)
__all__ = [
'_ARRAY_API', 'ALLOW_THREADS', 'BUFSIZE', 'CLIP', 'DATETIMEUNITS',
'ITEM_HASOBJECT', 'ITEM_IS_POINTER', 'LIST_PICKLE', 'MAXDIMS',
'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT', 'NEEDS_INIT', 'NEEDS_PYAPI',
'RAISE', 'USE_GETITEM', 'USE_SETITEM', 'WRAP', '_fastCopyAndTranspose',
'_flagdict', '_insert', '_reconstruct', '_vec_string', '_monotonicity',
'add_docstring', 'arange', 'array', 'bincount', 'broadcast',
'busday_count', 'busday_offset', 'busdaycalendar', 'can_cast',
'compare_chararrays', 'concatenate', 'copyto', 'correlate', 'correlate2',
'count_nonzero', 'c_einsum', 'datetime_as_string', 'datetime_data',
'digitize', 'dot', 'dragon4_positional', 'dragon4_scientific', 'dtype',
'empty', 'empty_like', 'error', 'flagsobj', 'flatiter', 'format_longfloat',
'frombuffer', 'fromfile', 'fromiter', 'fromstring', 'inner',
'int_asbuffer', 'interp', 'interp_complex', 'is_busday', 'lexsort',
'matmul', 'may_share_memory', 'min_scalar_type', 'ndarray', 'nditer',
'nested_iters', 'normalize_axis_index', 'packbits',
'promote_types', 'putmask', 'ravel_multi_index', 'result_type', 'scalar',
'set_datetimeparse_function', 'set_legacy_print_mode', 'set_numeric_ops',
'set_string_function', 'set_typeDict', 'shares_memory', 'test_interrupt',
'tracemalloc_domain', 'typeinfo', 'unpackbits', 'unravel_index', 'vdot',
'where', 'zeros']
if sys.version_info.major < 3:
__all__ += ['newbuffer', 'getbuffer']
# For backward compatibility, make sure pickle imports these functions from here
_reconstruct.__module__ = 'numpy.core.multiarray'
scalar.__module__ = 'numpy.core.multiarray'
arange.__module__ = 'numpy'
array.__module__ = 'numpy'
datetime_data.__module__ = 'numpy'
empty.__module__ = 'numpy'
frombuffer.__module__ = 'numpy'
fromfile.__module__ = 'numpy'
fromiter.__module__ = 'numpy'
frompyfunc.__module__ = 'numpy'
fromstring.__module__ = 'numpy'
geterrobj.__module__ = 'numpy'
may_share_memory.__module__ = 'numpy'
nested_iters.__module__ = 'numpy'
promote_types.__module__ = 'numpy'
set_numeric_ops.__module__ = 'numpy'
seterrobj.__module__ = 'numpy'
zeros.__module__ = 'numpy'
# We can't verify dispatcher signatures because NumPy's C functions don't
# support introspection.
array_function_from_c_func_and_dispatcher = functools.partial(
overrides.array_function_from_dispatcher,
module='numpy', docs_from_dispatcher=True, verify=False)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.empty_like)
def empty_like(prototype, dtype=None, order=None, subok=None, shape=None):
"""
empty_like(prototype, dtype=None, order='K', subok=True, shape=None)
Return a new array with the same shape and type as a given array.
Parameters
----------
prototype : array_like
The shape and data-type of `prototype` define these same attributes
of the returned array.
dtype : data-type, optional
Overrides the data type of the result.
.. versionadded:: 1.6.0
order : {'C', 'F', 'A', or 'K'}, optional
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if ``prototype`` is Fortran
contiguous, 'C' otherwise. 'K' means match the layout of ``prototype``
as closely as possible.
.. versionadded:: 1.6.0
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of 'a', otherwise it will be a base-class array. Defaults
to True.
shape : int or sequence of ints, optional.
Overrides the shape of the result. If order='K' and the number of
dimensions is unchanged, will try to keep order, otherwise,
order='C' is implied.
.. versionadded:: 1.17.0
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data with the same
shape and type as `prototype`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full_like : Return a new array with shape of input filled with value.
empty : Return a new uninitialized array.
Notes
-----
This function does *not* initialize the returned array; to do that use
`zeros_like` or `ones_like` instead. It may be marginally faster than
the functions that do set the array values.
Examples
--------
>>> a = ([1,2,3], [4,5,6]) # a is array-like
>>> np.empty_like(a)
array([[-1073741821, -1073741821, 3], # uninitialized
[ 0, 0, -1073741821]])
>>> a = np.array([[1., 2., 3.],[4.,5.,6.]])
>>> np.empty_like(a)
array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000], # uninitialized
[ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]])
"""
return (prototype,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.concatenate)
def concatenate(arrays, axis=None, out=None):
"""
concatenate((a1, a2, ...), axis=0, out=None)
Join a sequence of arrays along an existing axis.
Parameters
----------
a1, a2, ... : sequence of array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. If axis is None,
arrays are flattened before use. Default is 0.
out : ndarray, optional
If provided, the destination to place the result. The shape must be
correct, matching that of what concatenate would have returned if no
out argument were specified.
Returns
-------
res : ndarray
The concatenated array.
See Also
--------
ma.concatenate : Concatenate function that preserves input masks.
array_split : Split an array into multiple sub-arrays of equal or
near-equal size.
split : Split array into a list of multiple sub-arrays of equal size.
hsplit : Split array into multiple sub-arrays horizontally (column wise)
vsplit : Split array into multiple sub-arrays vertically (row wise)
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
stack : Stack a sequence of arrays along a new axis.
hstack : Stack arrays in sequence horizontally (column wise)
vstack : Stack arrays in sequence vertically (row wise)
dstack : Stack arrays in sequence depth wise (along third dimension)
block : Assemble arrays from blocks.
Notes
-----
When one or more of the arrays to be concatenated is a MaskedArray,
this function will return a MaskedArray object instead of an ndarray,
but the input masks are *not* preserved. In cases where a MaskedArray
is expected as input, use the ma.concatenate function from the masked
array module instead.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> b = np.array([[5, 6]])
>>> np.concatenate((a, b), axis=0)
array([[1, 2],
[3, 4],
[5, 6]])
>>> np.concatenate((a, b.T), axis=1)
array([[1, 2, 5],
[3, 4, 6]])
>>> np.concatenate((a, b), axis=None)
array([1, 2, 3, 4, 5, 6])
This function will not preserve masking of MaskedArray inputs.
>>> a = np.ma.arange(3)
>>> a[1] = np.ma.masked
>>> b = np.arange(2, 5)
>>> a
masked_array(data=[0, --, 2],
mask=[False, True, False],
fill_value=999999)
>>> b
array([2, 3, 4])
>>> np.concatenate([a, b])
masked_array(data=[0, 1, 2, 2, 3, 4],
mask=False,
fill_value=999999)
>>> np.ma.concatenate([a, b])
masked_array(data=[0, --, 2, 2, 3, 4],
mask=[False, True, False, False, False, False],
fill_value=999999)
"""
if out is not None:
# optimize for the typical case where only arrays is provided
arrays = list(arrays)
arrays.append(out)
return arrays
@array_function_from_c_func_and_dispatcher(_multiarray_umath.inner)
def inner(a, b):
"""
inner(a, b)
Inner product of two arrays.
Ordinary inner product of vectors for 1-D arrays (without complex
conjugation), in higher dimensions a sum product over the last axes.
Parameters
----------
a, b : array_like
If `a` and `b` are nonscalar, their last dimensions must match.
Returns
-------
out : ndarray
`out.shape = a.shape[:-1] + b.shape[:-1]`
Raises
------
ValueError
If the last dimension of `a` and `b` has different size.
See Also
--------
tensordot : Sum products over arbitrary axes.
dot : Generalised matrix product, using second last dimension of `b`.
einsum : Einstein summation convention.
Notes
-----
For vectors (1-D arrays) it computes the ordinary inner-product::
np.inner(a, b) = sum(a[:]*b[:])
More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`::
np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))
or explicitly::
np.inner(a, b)[i0,...,ir-1,j0,...,js-1]
= sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:])
In addition `a` or `b` may be scalars, in which case::
np.inner(a,b) = a*b
Examples
--------
Ordinary inner product for vectors:
>>> a = np.array([1,2,3])
>>> b = np.array([0,1,0])
>>> np.inner(a, b)
2
A multidimensional example:
>>> a = np.arange(24).reshape((2,3,4))
>>> b = np.arange(4)
>>> np.inner(a, b)
array([[ 14, 38, 62],
[ 86, 110, 134]])
An example where `b` is a scalar:
>>> np.inner(np.eye(2), 7)
array([[7., 0.],
[0., 7.]])
"""
return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.where)
def where(condition, x=None, y=None):
"""
where(condition, [x, y])
Return elements chosen from `x` or `y` depending on `condition`.
.. note::
When only `condition` is provided, this function is a shorthand for
``np.asarray(condition).nonzero()``. Using `nonzero` directly should be
preferred, as it behaves correctly for subclasses. The rest of this
documentation covers only the case where all three arguments are
provided.
Parameters
----------
condition : array_like, bool
Where True, yield `x`, otherwise yield `y`.
x, y : array_like
Values from which to choose. `x`, `y` and `condition` need to be
broadcastable to some shape.
Returns
-------
out : ndarray
An array with elements from `x` where `condition` is True, and elements
from `y` elsewhere.
See Also
--------
choose
nonzero : The function that is called when x and y are omitted
Notes
Loading ...