Learn more  » Push, build, and install  RubyGems npm packages Python packages Maven artifacts PHP packages Go Modules Bower components Debian packages RPM packages NuGet packages

edgify / torch   python

Repository URL to install this package:

/ packaged / ATen / native / native_functions.yaml

# See README.md in this directory for more guidance

# *********NB: _cast_* operators are DEPRECATED and will be removed
# eventually. These were previously used before TorchScript IR supported
# representing ScalarType's. They are now superseded by usage of
# `aten::to()`. The ops remain here for backward compatibility purposes.

# DEPRECATED. DO NOT USE
- func: _cast_Byte(Tensor self, bool non_blocking=False) -> Tensor
  variants: function

# DEPRECATED. DO NOT USE
- func: _cast_Char(Tensor self, bool non_blocking=False) -> Tensor
  variants: function

# DEPRECATED. DO NOT USE
- func: _cast_Double(Tensor self, bool non_blocking=False) -> Tensor
  variants: function

# DEPRECATED. DO NOT USE
- func: _cast_Float(Tensor self, bool non_blocking=False) -> Tensor
  variants: function

# DEPRECATED. DO NOT USE
- func: _cast_Int(Tensor self, bool non_blocking=False) -> Tensor
  variants: function

# DEPRECATED. DO NOT USE
- func: _cast_Long(Tensor self, bool non_blocking=False) -> Tensor
  variants: function

# DEPRECATED. DO NOT USE
- func: _cast_Short(Tensor self, bool non_blocking=False) -> Tensor
  variants: function

# DEPRECATED. DO NOT USE
- func: _cast_Half(Tensor self, bool non_blocking=False) -> Tensor
  variants: function

# Computes the gradient of current tensor w.r.t. graph leaves.
- func: _backward(Tensor self, Tensor[] inputs, Tensor? gradient=None, bool? retain_graph=None, bool create_graph=False) -> ()
  manual_cpp_binding: True
  variants: method

# DEPRECATED. Sets the tensor data held by this `Variable` to be the same as
# `new_data`.  It requires that `new_data` and `Variable` have compatible tensor
# type, by checking `_has_compatible_shallow_copy_type(this, new_data)`.
#
# This function is deprecated because it doesn't really make sense in a world
# where Variables *are* Tensors (as opposed to them containing tensors, which
# is what the previous interpretation was.)
- func: set_data(Tensor(a!) self, Tensor new_data) -> ()
  manual_cpp_binding: True
  variants: method

- func: data(Tensor self) -> Tensor
  manual_cpp_binding: True
  variants: method

# True if this `Variable` is a leaf and thus does not have a `grad_fn`.
- func: is_leaf(Tensor self) -> bool
  manual_cpp_binding: True
  variants: method

# Returns the output index of this variable from the forward operation that
# produced it.  Conversely, it returns the input index of the gradient `Node` to
# which this `Variable` is connected (because in the gradient computation,
# inputs and outputs switch meaning).  For example:
#
#   y0, y1, y2 = f(x)
#   assert y0.output_nr == 0
#   assert y1.output_nr == 1
#   assert y2.output_nr == 2
#
- func: output_nr(Tensor self) -> int
  manual_cpp_binding: True
  variants: method

- func: _version(Tensor self) -> int
  manual_cpp_binding: True
  variants: method

- func: requires_grad_(Tensor(a!) self, bool requires_grad=True) -> Tensor(a!)
  manual_cpp_binding: True
  variants: method

# Enables .grad attribute for non-leaf Tensors.
- func: retain_grad(Tensor(a!) self) -> ()
  manual_cpp_binding: True
  variants: method

- func: retains_grad(Tensor self) -> bool
  manual_cpp_binding: True
  variants: method

- func: _fw_primal(Tensor(a) self, int level) -> Tensor(a)
  variants: method
  dispatch:
    CompositeExplicitAutograd: _fw_primal

- func: _make_dual(Tensor(a) primal, Tensor tangent, int level) -> Tensor(a)
  variants: function
  dispatch:
    CompositeExplicitAutograd: _make_dual

- func: _unpack_dual(Tensor(a) dual, int level) -> (Tensor(a) primal, Tensor tangent)
  variants: function

# NOTE: [_new_zeros_with_same_feature_meta]
# This function creates a new tensor with the layout and TensorOptions
# of `other` but also takes into account the batch dimensions of `self`
#
# This function has a couple extra constraints because it is also used for `jvp`
# in functorch.
# - is used for forward AD because there is the restriction
#   that the primal and tangent must have the same layout
# - We cannot assume that `self` and `other` have the same sizes or even dim
#   because in the inplace over view case, `other` is the base tensor, and
#   `self` is the forward grad with respect to the view, which can have an
#   entirely different shape
# - takes the number of batch dims for `self` because we also handle
#   some batching logic. We handle that here instead of a batching rule because
#   we'd like to avoid calling as_strided in the batching rule (as to enable
#   nested vmap in functorch).
# - needs to be CompositeExplicitAutograd for jvp support in functorch.
#   functorch currently relies on TensorWrapper which does not have storage
#   CompositeExplicitAutograd makes sure the TensorWrapper is unwrapped.
# - this function may eventually take on another int argument to store the
#   the number of batch dims for other once we support that use case
- func: _new_zeros_with_same_feature_meta(Tensor self, Tensor other, *, int self_num_batch_dims=0) -> Tensor
  variants: function
  dispatch:
    CompositeExplicitAutograd: _new_zeros_with_same_feature_meta
  autogen: _new_zeros_with_same_feature_meta.out

# This function compares the storage numel of self with that of other, where
# storage numel is cumputed as: `other.storage().nbytes() / other.itemsize()`.
# We create this function for composite compliance purposes. The batching rule
# always returns true because vmapped as_strided does not support accessing
# storage locations not indexable by the input tensor.
# See the note above for more information.
- func: _has_same_storage_numel(Tensor self, Tensor other) -> bool
  variants: function
  dispatch:
    CompositeExplicitAutograd: _has_same_storage_numel

- func: rename_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!)
  variants: method
  tags: inplace_view

- func: rename(Tensor(a) self, Dimname[]? names) -> Tensor(a)
  variants: method

- func: align_to(Tensor(a) self, Dimname[] names) -> Tensor(a)
  variants: method

- func: align_to.ellipsis_idx(Tensor(a) self, Dimname[] order, int ellipsis_idx) -> Tensor(a)
  variants: method

- func: align_as(Tensor self, Tensor other) -> Tensor
  variants: method

- func: align_tensors(Tensor[] tensors) -> Tensor[]

# Not assert because it's a keyword; not Assert because FX already
# took that syntax
# TODO: need to specify this is side-effectful somehow
- func: _assert_async(Tensor self) -> ()
  dispatch:
    CPU: _assert_async_cpu
    CUDA: _assert_async_cuda


- func: _assert_tensor_metadata(Tensor a, int[]? size=None, int[]? stride=None, ScalarType? dtype=None) -> ()

- func: refine_names(Tensor(a) self, Dimname[] names) -> Tensor(a)
  variants: method

- func: _use_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank) -> bool
  device_check: NoCheck  # Tensor arguments allowed to be on different devices, see also _cudnn_ctc_loss
  dispatch:
    CUDA: _use_cudnn_ctc_loss

- func: _use_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank) -> bool
  device_check: NoCheck  # Tensor arguments allowed to be on different devices, see also _cudnn_ctc_loss
  dispatch:
    CUDA: _use_cudnn_ctc_loss_tensor

- func: _cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor)
  device_check: NoCheck  # log_probs is expected to be on CUDA while targets is expected to be on CPU
  dispatch:
    CUDA: _cudnn_ctc_loss
  autogen: _cudnn_ctc_loss.out

- func: _cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor)
  device_check: NoCheck  # log_probs is expected to be on CUDA while targets is expected to be on CPU
  dispatch:
    CUDA: _cudnn_ctc_loss_tensor

- func: _use_cudnn_rnn_flatten_weight() -> bool

- func: _cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor
  dispatch:
    CUDA: _cudnn_rnn_flatten_weight
  autogen: _cudnn_rnn_flatten_weight.out

- func: _cudnn_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
  # rnn_tanh may or may not redispatch to _cudnn_rnn based on algorithm and build. Thus it might hit dispatch or kernel device check.
  # Disable dispatch time device check for consistent behavior.
  device_check: NoCheck
  dispatch:
    CUDA: _cudnn_rnn
  autogen: _cudnn_rnn.out

- func: _cudnn_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])
  dispatch:
    CUDA: _cudnn_rnn_backward
  autogen: _cudnn_rnn_backward.out

- func: _cudnn_init_dropout_state(float dropout, bool train, int dropout_seed, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
  dispatch:
    CUDA: _cudnn_init_dropout_state
  autogen: _cudnn_init_dropout_state.out

- func: _debug_has_internal_overlap(Tensor self) -> int
  variants: function

- func: _fused_dropout(Tensor self, float p, Generator? generator=None) -> (Tensor, Tensor)
  variants: function
  dispatch:
    CUDA: fused_dropout_cuda
  tags: nondeterministic_seeded
  autogen: _fused_dropout.out

- func: _masked_scale(Tensor self, Tensor mask, float scale) -> Tensor
  variants: function
  dispatch:
    CUDA: masked_scale_cuda
  autogen: _masked_scale.out

- func: native_dropout(Tensor input, float p, bool? train) -> (Tensor, Tensor)
  variants: function
  dispatch:
    CPU: native_dropout_cpu
    CUDA: native_dropout_cuda
    NestedTensorCPU, NestedTensorCUDA: native_dropout_nested
  tags: [nondeterministic_seeded, core]
  autogen: native_dropout.out

- func: native_dropout_backward(Tensor grad_output, Tensor mask, float scale) -> Tensor
  dispatch:
    CPU, NestedTensorCPU, NestedTensorCUDA: native_dropout_backward
    CUDA: native_dropout_backward_cuda
  autogen: native_dropout_backward.out
  tags: pointwise

- func: _sobol_engine_draw(Tensor quasi, int n, Tensor sobolstate, int dimension, int num_generated, ScalarType? dtype) -> (Tensor, Tensor)

- func: _sobol_engine_ff_(Tensor(a!) self, int n, Tensor sobolstate, int dimension, int num_generated) -> Tensor(a!)

- func: _sobol_engine_scramble_(Tensor(a!) self, Tensor ltm, int dimension) -> Tensor(a!)

- func: _sobol_engine_initialize_state_(Tensor(a!) self, int dimension) -> Tensor(a!)

- func: _reshape_from_tensor(Tensor self, Tensor shape) -> Tensor

- func: _shape_as_tensor(Tensor self) -> Tensor

- func: dropout(Tensor input, float p, bool train) -> Tensor
  tags: nondeterministic_seeded

- func: dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
  tags: nondeterministic_seeded

- func: feature_dropout(Tensor input, float p, bool train) -> Tensor
  tags: nondeterministic_seeded

- func: feature_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
  tags: nondeterministic_seeded

- func: alpha_dropout(Tensor input, float p, bool train) -> Tensor
  tags: nondeterministic_seeded

- func: alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
  tags: nondeterministic_seeded

- func: feature_alpha_dropout(Tensor input, float p, bool train) -> Tensor
  tags: nondeterministic_seeded

- func: feature_alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)
  tags: nondeterministic_seeded

- func: abs(Tensor self) -> Tensor
  device_check: NoCheck   # TensorIterator
  variants: function, method
  dispatch:
    CompositeExplicitAutograd: abs
    SparseCPU, SparseCUDA: abs_sparse
    SparseCsrCPU, SparseCsrCUDA: abs_sparse_csr
  tags: [core, pointwise]

- func: abs_(Tensor(a!) self) -> Tensor(a!)
  device_check: NoCheck   # TensorIterator
  variants: function, method
  dispatch:
    CompositeExplicitAutograd: abs_
    SparseCPU, SparseCUDA: abs_sparse_
    SparseCsrCPU, SparseCsrCUDA: abs_sparse_csr_

- func: abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
  device_check: NoCheck   # TensorIterator
  dispatch:
    CPU, CUDA: abs_out
    MPS: abs_out_mps
    SparseCPU, SparseCUDA: abs_sparse_out
    SparseCsrCPU, SparseCsrCUDA: abs_sparse_csr_out
  tags: pointwise

# Note [Adding an alias]
# To add an alias do the following:
#
# 1) Copy the original functions native_functions.yaml entry, but replace the
#      original function's name with their own and delete any dispatch
#      keys for the aliases. Specifying a dispatch key will prevent
#      autograd from recording the operations the alias performs, which
#      will stop it from "inheriting" the original operation's autograd behavior.
# 2) Implement the corresponding functions and have them redispatch to the
#      original function.
# 3) Add docstrings to the new function that reference the original function,
#      and document the method as usual (if it exists.)
#    (See torch/_torch_docs.py and docs/source/torch.rst if adding a function,
#     torch/_tensor_docs.py and docs/source/tensors.rst if adding a method,
#     or module-specific doc bindings (like torch/linalg/__init__.py) if
#     adding an alias in a namespace.)
# 4) Update torch/overrides.py consistent with the original function.
# 5) Update the alias_map in torch/csrc/jit/passes/normalize_ops.cpp.
# 6) Add aliases argument to existing OpInfo/UnaryUfuncInfo or create new OpInfo/UnaryUfuncInfo entry
# in op_db list in torch/testing/_internal/common_methods_invocations.py
#
# See torch.absolute, an alias for torch.abs, as an example.
# Absolute, alias for abs

- func: absolute(Tensor self) -> Tensor
  device_check: NoCheck   # TensorIterator
  variants: function, method
Loading ...