"""Adds docstrings to Tensor functions"""
import torch._C
from torch._C import _add_docstr as add_docstr
from ._torch_docs import parse_kwargs
from ._torch_docs import reproducibility_notes
def add_docstr_all(method, docstr):
add_docstr(getattr(torch._C._TensorBase, method), docstr)
common_args = parse_kwargs("""
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
returned Tensor. Default: ``torch.preserve_format``.
""")
new_common_args = parse_kwargs("""
size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
shape of the output tensor.
dtype (:class:`torch.dtype`, optional): the desired type of returned tensor.
Default: if None, same :class:`torch.dtype` as this tensor.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if None, same :class:`torch.device` as this tensor.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
""")
add_docstr_all('new_tensor',
r"""
new_tensor(data, dtype=None, device=None, requires_grad=False) -> Tensor
Returns a new Tensor with :attr:`data` as the tensor data.
By default, the returned Tensor has the same :class:`torch.dtype` and
:class:`torch.device` as this tensor.
.. warning::
:func:`new_tensor` always copies :attr:`data`. If you have a Tensor
``data`` and want to avoid a copy, use :func:`torch.Tensor.requires_grad_`
or :func:`torch.Tensor.detach`.
If you have a numpy array and want to avoid a copy, use
:func:`torch.from_numpy`.
.. warning::
When data is a tensor `x`, :func:`new_tensor()` reads out 'the data' from whatever it is passed,
and constructs a leaf variable. Therefore ``tensor.new_tensor(x)`` is equivalent to ``x.clone().detach()``
and ``tensor.new_tensor(x, requires_grad=True)`` is equivalent to ``x.clone().detach().requires_grad_(True)``.
The equivalents using ``clone()`` and ``detach()`` are recommended.
Args:
data (array_like): The returned Tensor copies :attr:`data`.
{dtype}
{device}
{requires_grad}
Example::
>>> tensor = torch.ones((2,), dtype=torch.int8)
>>> data = [[0, 1], [2, 3]]
>>> tensor.new_tensor(data)
tensor([[ 0, 1],
[ 2, 3]], dtype=torch.int8)
""".format(**new_common_args))
add_docstr_all('new_full',
r"""
new_full(size, fill_value, dtype=None, device=None, requires_grad=False) -> Tensor
Returns a Tensor of size :attr:`size` filled with :attr:`fill_value`.
By default, the returned Tensor has the same :class:`torch.dtype` and
:class:`torch.device` as this tensor.
Args:
fill_value (scalar): the number to fill the output tensor with.
{dtype}
{device}
{requires_grad}
Example::
>>> tensor = torch.ones((2,), dtype=torch.float64)
>>> tensor.new_full((3, 4), 3.141592)
tensor([[ 3.1416, 3.1416, 3.1416, 3.1416],
[ 3.1416, 3.1416, 3.1416, 3.1416],
[ 3.1416, 3.1416, 3.1416, 3.1416]], dtype=torch.float64)
""".format(**new_common_args))
add_docstr_all('new_empty',
r"""
new_empty(size, dtype=None, device=None, requires_grad=False) -> Tensor
Returns a Tensor of size :attr:`size` filled with uninitialized data.
By default, the returned Tensor has the same :class:`torch.dtype` and
:class:`torch.device` as this tensor.
Args:
{dtype}
{device}
{requires_grad}
Example::
>>> tensor = torch.ones(())
>>> tensor.new_empty((2, 3))
tensor([[ 5.8182e-18, 4.5765e-41, -1.0545e+30],
[ 3.0949e-41, 4.4842e-44, 0.0000e+00]])
""".format(**new_common_args))
add_docstr_all('new_empty_strided',
r"""
new_empty_strided(size, stride, dtype=None, device=None, requires_grad=False) -> Tensor
Returns a Tensor of size :attr:`size` and strides :attr:`stride` filled with
uninitialized data. By default, the returned Tensor has the same
:class:`torch.dtype` and :class:`torch.device` as this tensor.
Args:
{dtype}
{device}
{requires_grad}
Example::
>>> tensor = torch.ones(())
>>> tensor.new_empty_strided((2, 3), (3, 1))
tensor([[ 5.8182e-18, 4.5765e-41, -1.0545e+30],
[ 3.0949e-41, 4.4842e-44, 0.0000e+00]])
""".format(**new_common_args))
add_docstr_all('new_ones',
r"""
new_ones(size, dtype=None, device=None, requires_grad=False) -> Tensor
Returns a Tensor of size :attr:`size` filled with ``1``.
By default, the returned Tensor has the same :class:`torch.dtype` and
:class:`torch.device` as this tensor.
Args:
size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
shape of the output tensor.
{dtype}
{device}
{requires_grad}
Example::
>>> tensor = torch.tensor((), dtype=torch.int32)
>>> tensor.new_ones((2, 3))
tensor([[ 1, 1, 1],
[ 1, 1, 1]], dtype=torch.int32)
""".format(**new_common_args))
add_docstr_all('new_zeros',
r"""
new_zeros(size, dtype=None, device=None, requires_grad=False) -> Tensor
Returns a Tensor of size :attr:`size` filled with ``0``.
By default, the returned Tensor has the same :class:`torch.dtype` and
:class:`torch.device` as this tensor.
Args:
size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
shape of the output tensor.
{dtype}
{device}
{requires_grad}
Example::
>>> tensor = torch.tensor((), dtype=torch.float64)
>>> tensor.new_zeros((2, 3))
tensor([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=torch.float64)
""".format(**new_common_args))
add_docstr_all('abs',
r"""
abs() -> Tensor
See :func:`torch.abs`
""")
add_docstr_all('abs_',
r"""
abs_() -> Tensor
In-place version of :meth:`~Tensor.abs`
""")
add_docstr_all('absolute',
r"""
absolute() -> Tensor
Alias for :func:`abs`
""")
add_docstr_all('absolute_',
r"""
absolute_() -> Tensor
In-place version of :meth:`~Tensor.absolute`
Alias for :func:`abs_`
""")
add_docstr_all('acos',
r"""
acos() -> Tensor
See :func:`torch.acos`
""")
add_docstr_all('acos_',
r"""
acos_() -> Tensor
In-place version of :meth:`~Tensor.acos`
""")
add_docstr_all('arccos', r"""
arccos() -> Tensor
See :func:`torch.arccos`
""")
add_docstr_all('arccos_', r"""
arccos_() -> Tensor
In-place version of :meth:`~Tensor.arccos`
""")
add_docstr_all('acosh',
r"""
acosh() -> Tensor
See :func:`torch.acosh`
""")
add_docstr_all('acosh_',
r"""
acosh_() -> Tensor
In-place version of :meth:`~Tensor.acosh`
""")
add_docstr_all('arccosh', r"""
acosh() -> Tensor
See :func:`torch.arccosh`
""")
add_docstr_all('arccosh_', r"""
acosh_() -> Tensor
In-place version of :meth:`~Tensor.arccosh`
""")
add_docstr_all('add',
r"""
add(other, *, alpha=1) -> Tensor
Add a scalar or tensor to :attr:`self` tensor. If both :attr:`alpha`
and :attr:`other` are specified, each element of :attr:`other` is scaled by
:attr:`alpha` before being used.
When :attr:`other` is a tensor, the shape of :attr:`other` must be
:ref:`broadcastable <broadcasting-semantics>` with the shape of the underlying
tensor
See :func:`torch.add`
""")
add_docstr_all('add_',
r"""
add_(other, *, alpha=1) -> Tensor
In-place version of :meth:`~Tensor.add`
""")
add_docstr_all('addbmm',
r"""
addbmm(batch1, batch2, *, beta=1, alpha=1) -> Tensor
See :func:`torch.addbmm`
""")
add_docstr_all('addbmm_',
r"""
addbmm_(batch1, batch2, *, beta=1, alpha=1) -> Tensor
In-place version of :meth:`~Tensor.addbmm`
""")
add_docstr_all('addcdiv',
r"""
addcdiv(tensor1, tensor2, *, value=1) -> Tensor
See :func:`torch.addcdiv`
""")
add_docstr_all('addcdiv_',
r"""
addcdiv_(tensor1, tensor2, *, value=1) -> Tensor
In-place version of :meth:`~Tensor.addcdiv`
""")
add_docstr_all('addcmul',
r"""
addcmul(tensor1, tensor2, *, value=1) -> Tensor
See :func:`torch.addcmul`
""")
add_docstr_all('addcmul_',
r"""
addcmul_(tensor1, tensor2, *, value=1) -> Tensor
In-place version of :meth:`~Tensor.addcmul`
""")
add_docstr_all('addmm',
r"""
addmm(mat1, mat2, *, beta=1, alpha=1) -> Tensor
See :func:`torch.addmm`
""")
add_docstr_all('addmm_',
r"""
addmm_(mat1, mat2, *, beta=1, alpha=1) -> Tensor
In-place version of :meth:`~Tensor.addmm`
""")
add_docstr_all('addmv',
r"""
Loading ...