Learn more  » Push, build, and install  RubyGems npm packages Python packages Maven artifacts PHP packages Go Modules Bower components Debian packages RPM packages NuGet packages

neilisaac / torch   python

Repository URL to install this package:

Version: 1.8.0 

/ _C / _VariableFunctions.pyi

# @generated from torch/_C/_VariableFunctions.pyi.in

from torch import Tensor, Generator, strided, memory_format, contiguous_format, strided
from typing import List, Tuple, Optional, Union, Any, ContextManager, Callable, overload, Iterator, NamedTuple, Sequence, TypeVar
from torch._six import inf

from torch.types import _int, _float, _bool, Number, _dtype, _device, _qscheme, _size, _layout

import builtins

# REDUNDANT!
namedtuple_primal_tangent = NamedTuple("namedtuple_primal_tangent", [("primal", Tensor), ("tangent", Tensor)])
namedtuple_values_indices = NamedTuple("namedtuple_values_indices", [("values", Tensor), ("indices", Tensor)])
namedtuple_eigenvalues_eigenvectors = NamedTuple("namedtuple_eigenvalues_eigenvectors", [("eigenvalues", Tensor), ("eigenvectors", Tensor)])
namedtuple_a_tau = NamedTuple("namedtuple_a_tau", [("a", Tensor), ("tau", Tensor)])
namedtuple_solution_QR = NamedTuple("namedtuple_solution_QR", [("solution", Tensor), ("QR", Tensor)])
namedtuple_Q_R = NamedTuple("namedtuple_Q_R", [("Q", Tensor), ("R", Tensor)])
namedtuple_sign_logabsdet = NamedTuple("namedtuple_sign_logabsdet", [("sign", Tensor), ("logabsdet", Tensor)])
namedtuple_solution_LU = NamedTuple("namedtuple_solution_LU", [("solution", Tensor), ("LU", Tensor)])
namedtuple_U_S_V = NamedTuple("namedtuple_U_S_V", [("U", Tensor), ("S", Tensor), ("V", Tensor)])
namedtuple_solution_cloned_coefficient = NamedTuple("namedtuple_solution_cloned_coefficient", [("solution", Tensor), ("cloned_coefficient", Tensor)])

@overload
def __and__(input: Tensor, other: Tensor) -> Tensor: ...
@overload
def __and__(input: Tensor, other: Number) -> Tensor: ...
@overload
def __lshift__(input: Tensor, other: Tensor) -> Tensor: ...
@overload
def __lshift__(input: Tensor, other: Number) -> Tensor: ...
@overload
def __or__(input: Tensor, other: Tensor) -> Tensor: ...
@overload
def __or__(input: Tensor, other: Number) -> Tensor: ...
@overload
def __rshift__(input: Tensor, other: Tensor) -> Tensor: ...
@overload
def __rshift__(input: Tensor, other: Number) -> Tensor: ...
@overload
def __xor__(input: Tensor, other: Tensor) -> Tensor: ...
@overload
def __xor__(input: Tensor, other: Number) -> Tensor: ...
def _adaptive_avg_pool2d(input: Tensor, output_size: Union[_int, _size]) -> Tensor: ...
def _add_batch_dim(input: Tensor, batch_dim: _int, level: _int) -> Tensor: ...
def _add_relu(input: Tensor, other: Tensor, *, alpha: Number=1, out: Optional[Tensor]=None) -> Tensor: ...
def _add_relu_(input: Tensor, other: Tensor, *, alpha: Number=1) -> Tensor: ...
def _addmv_impl_(input: Tensor, self2: Tensor, mat: Tensor, vec: Tensor, *, beta: Number=1, alpha: Number=1) -> Tensor: ...
@overload
def _aminmax(input: Tensor) -> Tuple[Tensor, Tensor]: ...
@overload
def _aminmax(input: Tensor, dim: _int, keepdim: _bool=False) -> Tuple[Tensor, Tensor]: ...
def _amp_foreach_non_finite_check_and_unscale_(self: Union[Tuple[Tensor, ...], List[Tensor]], found_inf: Tensor, inv_scale: Tensor) -> None: ...
def _amp_update_scale(growth_tracker: Tensor, current_scale: Tensor, found_inf: Tensor, scale_growth_factor: _float, scale_backoff_factor: _float, growth_interval: _int) -> Tensor: ...
def _baddbmm_mkl_(input: Tensor, batch1: Tensor, batch2: Tensor, *, beta: Number=1, alpha: Number=1) -> Tensor: ...
def _batch_norm_impl_index(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, momentum: _float, eps: _float, cudnn_enabled: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor, _int]: ...
def _bmm(input: Tensor, mat2: Tensor, *, deterministic: _bool=False, out: Optional[Tensor]=None) -> Tensor: ...
def _cast_Byte(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
def _cast_Char(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
def _cast_Double(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
def _cast_Float(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
def _cast_Half(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
def _cast_Int(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
def _cast_Long(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
def _cast_Short(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
def _cat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int=0, *, out: Optional[Tensor]=None) -> Tensor: ...
def _choose_qparams_per_tensor(input: Tensor, reduce_range: _bool=False) -> Tuple[_float, _int]: ...
def _compute_linear_combination(input: Tensor, coefficients: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def _conj(input: Tensor) -> Tensor: ...
@overload
def _convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: _size, padding: _size, dilation: _size, transposed: _bool, output_padding: _size, groups: _int, benchmark: _bool, deterministic: _bool, cudnn_enabled: _bool) -> Tensor: ...
@overload
def _convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: _size, padding: _size, dilation: _size, transposed: _bool, output_padding: _size, groups: _int, benchmark: _bool, deterministic: _bool, cudnn_enabled: _bool, allow_tf32: _bool) -> Tensor: ...
def _convolution_nogroup(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: _size, padding: _size, dilation: _size, transposed: _bool, output_padding: _size) -> Tensor: ...
def _copy_from(input: Tensor, dst: Tensor, non_blocking: _bool=False) -> Tensor: ...
def _ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int=0, zero_infinity: _bool=False) -> Tuple[Tensor, Tensor]: ...
def _cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int, deterministic: _bool, zero_infinity: _bool) -> Tuple[Tensor, Tensor]: ...
def _cudnn_init_dropout_state(dropout: _float, train: _bool, dropout_seed: _int, *, dtype: _dtype=None, layout: Optional[_layout]=strided, device: Union[_device, str, None]=None, pin_memory: _bool=False, requires_grad: _bool=False) -> Tensor: ...
def _cudnn_rnn(input: Tensor, weight: Union[Tuple[Tensor, ...], List[Tensor]], weight_stride0: _int, weight_buf: Optional[Tensor], hx: Tensor, cx: Optional[Tensor], mode: _int, hidden_size: _int, proj_size: _int, num_layers: _int, batch_first: _bool, dropout: _float, train: _bool, bidirectional: _bool, batch_sizes: _size, dropout_state: Optional[Tensor]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: ...
def _cudnn_rnn_flatten_weight(weight_arr: Union[Tuple[Tensor, ...], List[Tensor]], weight_stride0: _int, input_size: _int, mode: _int, hidden_size: _int, proj_size: _int, num_layers: _int, batch_first: _bool, bidirectional: _bool) -> Tensor: ...
def _cufft_clear_plan_cache(device_index: _int) -> None: ...
def _cufft_get_plan_cache_max_size(device_index: _int) -> _int: ...
def _cufft_get_plan_cache_size(device_index: _int) -> _int: ...
def _cufft_set_plan_cache_max_size(device_index: _int, max_size: _int) -> None: ...
def _cummax_helper(input: Tensor, values: Tensor, indices: Tensor, dim: _int) -> None: ...
def _cummin_helper(input: Tensor, values: Tensor, indices: Tensor, dim: _int) -> None: ...
def _debug_has_internal_overlap(input: Tensor) -> _int: ...
def _dim_arange(like: Tensor, dim: _int) -> Tensor: ...
def _dirichlet_grad(x: Tensor, alpha: Tensor, total: Tensor) -> Tensor: ...
def _embedding_bag(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool=False, mode: _int=0, sparse: _bool=False, per_sample_weights: Optional[Tensor]=None, include_last_offset: _bool=False) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
def _embedding_bag_forward_only(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool=False, mode: _int=0, sparse: _bool=False, per_sample_weights: Optional[Tensor]=None, include_last_offset: _bool=False) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
@overload
def _empty_affine_quantized(size: _size, *, scale: _float=1, zero_point: _int=0, memory_format: Optional[memory_format]=contiguous_format, dtype: _dtype=None, layout: Optional[_layout]=strided, device: Union[_device, str, None]=None, pin_memory: _bool=False, requires_grad: _bool=False) -> Tensor: ...
@overload
def _empty_affine_quantized(*size: _int, scale: _float=1, zero_point: _int=0, memory_format: Optional[memory_format]=contiguous_format, dtype: _dtype=None, layout: Optional[_layout]=strided, device: Union[_device, str, None]=None, pin_memory: _bool=False, requires_grad: _bool=False) -> Tensor: ...
@overload
def _empty_per_channel_affine_quantized(size: _size, *, scales: Tensor, zero_points: Tensor, axis: _int, memory_format: Optional[memory_format]=contiguous_format, dtype: _dtype=None, layout: Optional[_layout]=strided, device: Union[_device, str, None]=None, pin_memory: _bool=False, requires_grad: _bool=False) -> Tensor: ...
@overload
def _empty_per_channel_affine_quantized(*size: _int, scales: Tensor, zero_points: Tensor, axis: _int, memory_format: Optional[memory_format]=contiguous_format, dtype: _dtype=None, layout: Optional[_layout]=strided, device: Union[_device, str, None]=None, pin_memory: _bool=False, requires_grad: _bool=False) -> Tensor: ...
def _euclidean_dist(x1: Tensor, x2: Tensor) -> Tensor: ...
def _fake_quantize_learnable_per_channel_affine(input: Tensor, scale: Tensor, zero_point: Tensor, axis: _int, quant_min: _int, quant_max: _int, grad_factor: _float=1.0) -> Tensor: ...
def _fake_quantize_learnable_per_tensor_affine(input: Tensor, scale: Tensor, zero_point: Tensor, quant_min: _int, quant_max: _int, grad_factor: _float=1.0) -> Tensor: ...
def _fft_c2c(input: Tensor, dim: _size, normalization: _int, forward: _bool, *, out: Optional[Tensor]=None) -> Tensor: ...
def _fft_c2r(input: Tensor, dim: _size, normalization: _int, last_dim_size: _int, *, out: Optional[Tensor]=None) -> Tensor: ...
def _fft_r2c(input: Tensor, dim: _size, normalization: _int, onesided: _bool, *, out: Optional[Tensor]=None) -> Tensor: ...
def _foreach_abs(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def _foreach_abs_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_acos(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def _foreach_acos_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
@overload
def _foreach_add(tensors: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[float]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
@overload
def _foreach_add(tensors: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
@overload
def _foreach_add(tensors1: Union[Tuple[Tensor, ...], List[Tensor]], tensors2: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Number=1) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
@overload
def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[float]) -> None: ...
@overload
def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> None: ...
@overload
def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Number=1) -> None: ...
@overload
def _foreach_addcdiv(input: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[float]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
@overload
def _foreach_addcdiv(input: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Number=1) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
@overload
def _foreach_addcdiv_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[float]) -> None: ...
@overload
def _foreach_addcdiv_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Number=1) -> None: ...
@overload
def _foreach_addcmul(input: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[float]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
@overload
def _foreach_addcmul(input: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Number=1) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
@overload
def _foreach_addcmul_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[float]) -> None: ...
@overload
def _foreach_addcmul_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Number=1) -> None: ...
def _foreach_asin(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def _foreach_asin_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_atan(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def _foreach_atan_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_ceil(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def _foreach_ceil_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_cos(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def _foreach_cos_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_cosh(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def _foreach_cosh_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
@overload
def _foreach_div(tensors: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[float]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
@overload
def _foreach_div(tensors: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
@overload
def _foreach_div(tensors1: Union[Tuple[Tensor, ...], List[Tensor]], tensors2: Union[Tuple[Tensor, ...], List[Tensor]]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
@overload
def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[float]) -> None: ...
@overload
def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> None: ...
@overload
def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_erf(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def _foreach_erf_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_erfc(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def _foreach_erfc_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_exp(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def _foreach_exp_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_expm1(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def _foreach_expm1_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_floor(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def _foreach_floor_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_frac(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def _foreach_frac_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_lgamma(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def _foreach_lgamma_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_log(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def _foreach_log10(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def _foreach_log10_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_log1p(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def _foreach_log1p_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_log2(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def _foreach_log2_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_log_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_maximum(tensors1: Union[Tuple[Tensor, ...], List[Tensor]], tensors2: Union[Tuple[Tensor, ...], List[Tensor]]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def _foreach_minimum(tensors1: Union[Tuple[Tensor, ...], List[Tensor]], tensors2: Union[Tuple[Tensor, ...], List[Tensor]]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
@overload
def _foreach_mul(tensors: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[float]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
@overload
def _foreach_mul(tensors: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
@overload
def _foreach_mul(tensors1: Union[Tuple[Tensor, ...], List[Tensor]], tensors2: Union[Tuple[Tensor, ...], List[Tensor]]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
@overload
def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[float]) -> None: ...
@overload
def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> None: ...
@overload
def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_neg(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def _foreach_neg_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_reciprocal(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def _foreach_reciprocal_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_round(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def _foreach_round_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_sigmoid(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def _foreach_sigmoid_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_sin(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def _foreach_sin_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_sinh(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def _foreach_sinh_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_sqrt(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def _foreach_sqrt_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
@overload
def _foreach_sub(tensors: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[float]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
@overload
def _foreach_sub(tensors: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
@overload
def _foreach_sub(tensors1: Union[Tuple[Tensor, ...], List[Tensor]], tensors2: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Number=1) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
@overload
def _foreach_sub_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[float]) -> None: ...
@overload
def _foreach_sub_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> None: ...
@overload
def _foreach_sub_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Number=1) -> None: ...
def _foreach_tan(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def _foreach_tan_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_tanh(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def _foreach_tanh_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_trunc(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Union[Tuple[Tensor, ...], List[Tensor]]: ...
def _foreach_trunc_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_zero_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _fused_dropout(input: Tensor, p: _float, generator: Optional[Generator]=None) -> Tuple[Tensor, Tensor]: ...
def _grid_sampler_2d_cpu_fallback(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ...
def _has_compatible_shallow_copy_type(input: Tensor, from_: Tensor) -> _bool: ...
def _index_copy_(input: Tensor, dim: _int, index: Tensor, source: Tensor) -> Tensor: ...
def _index_put_impl_(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool=False, unsafe: _bool=False) -> Tensor: ...
def _linalg_inv_out_helper_(input: Tensor, infos_lu: Tensor, infos_getri: Tensor) -> Tensor: ...
def _linalg_qr_helper(input: Tensor, mode: str) -> Tuple[Tensor, Tensor]: ...
def _linalg_solve_out_helper_(input: Tensor, other: Tensor, infos: Tensor) -> Tensor: ...
def _log_softmax(input: Tensor, dim: _int, half_to_float: _bool) -> Tensor: ...
def _log_softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input: Tensor) -> Tensor: ...
def _logcumsumexp(input: Tensor, dim: _int, *, out: Optional[Tensor]=None) -> Tensor: ...
def _lu_solve_helper(input: Tensor, LU_data: Tensor, LU_pivots: Tensor) -> Tensor: ...
def _lu_with_info(input: Tensor, pivot: _bool=True, check_errors: _bool=True) -> Tuple[Tensor, Tensor, Tensor]: ...
def _make_dual(primal: Tensor, tangent: Tensor, level: _int) -> Tensor: ...
def _make_per_channel_quantized_tensor(input: Tensor, scale: Tensor, zero_point: Tensor, axis: _int) -> Tensor: ...
def _make_per_tensor_quantized_tensor(input: Tensor, scale: _float, zero_point: _int) -> Tensor: ...
def _masked_scale(input: Tensor, mask: Tensor, scale: _float) -> Tensor: ...
def _mkldnn_reshape(input: Tensor, shape: _size) -> Tensor: ...
def _mkldnn_transpose(input: Tensor, dim0: _int, dim1: _int) -> Tensor: ...
def _mkldnn_transpose_(input: Tensor, dim0: _int, dim1: _int) -> Tensor: ...
def _mode(input: Tensor, dim: _int=-1, keepdim: _bool=False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> Tuple[Tensor, Tensor]: ...
def _nnpack_available() -> _bool: ...
def _nnpack_spatial_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Union[_int, _size], stride: Union[_int, _size]=1) -> Tensor: ...
def _pack_padded_sequence(input: Tensor, lengths: Tensor, batch_first: _bool) -> Tuple[Tensor, Tensor]: ...
def _pad_packed_sequence(data: Tensor, batch_sizes: Tensor, batch_first: _bool, padding_value: Number, total_length: _int) -> Tuple[Tensor, Tensor]: ...
def _remove_batch_dim(input: Tensor, level: _int, batch_size: _int, out_dim: _int) -> Tensor: ...
def _reshape_from_tensor(input: Tensor, shape: Tensor) -> Tensor: ...
def _rowwise_prune(weight: Tensor, mask: Tensor, compressed_indices_dtype: _dtype) -> Tuple[Tensor, Tensor]: ...
def _s_where(condition: Tensor, input: Tensor, other: Tensor) -> Tensor: ...
def _sample_dirichlet(input: Tensor, generator: Optional[Generator]=None) -> Tensor: ...
def _saturate_weight_to_fp16(weight: Tensor) -> Tensor: ...
def _shape_as_tensor(input: Tensor) -> Tensor: ...
def _sobol_engine_draw(quasi: Tensor, n: _int, sobolstate: Tensor, dimension: _int, num_generated: _int, dtype: Optional[_dtype]) -> Tuple[Tensor, Tensor]: ...
def _sobol_engine_ff_(input: Tensor, n: _int, sobolstate: Tensor, dimension: _int, num_generated: _int) -> Tensor: ...
def _sobol_engine_initialize_state_(input: Tensor, dimension: _int) -> Tensor: ...
def _sobol_engine_scramble_(input: Tensor, ltm: Tensor, dimension: _int) -> Tensor: ...
def _softmax(input: Tensor, dim: _int, half_to_float: _bool) -> Tensor: ...
def _softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input: Tensor) -> Tensor: ...
def _sparse_addmm(input: Tensor, sparse: Tensor, dense: Tensor, *, beta: Number=1, alpha: Number=1) -> Tensor: ...
def _sparse_coo_tensor_unsafe(indices: Tensor, values: Tensor, size: List[int], dtype: Optional[_dtype] = None, device: Optional[_device] = None, requires_grad: bool = False) -> Tensor: ...
@overload
def _sparse_log_softmax(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype]=None) -> Tensor: ...
@overload
def _sparse_log_softmax(input: Tensor, dim: _int, dtype: Optional[_dtype]=None) -> Tensor: ...
@overload
def _sparse_log_softmax(input: Tensor, dim: _int, half_to_float: _bool) -> Tensor: ...
def _sparse_log_softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input: Tensor) -> Tensor: ...
def _sparse_matrix_mask_helper(t: Tensor, mask_indices: Tensor) -> Tensor: ...
def _sparse_mm(sparse: Tensor, dense: Tensor) -> Tensor: ...
@overload
def _sparse_softmax(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype]=None) -> Tensor: ...
@overload
def _sparse_softmax(input: Tensor, dim: _int, dtype: Optional[_dtype]=None) -> Tensor: ...
@overload
def _sparse_softmax(input: Tensor, dim: _int, half_to_float: _bool) -> Tensor: ...
def _sparse_softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input: Tensor) -> Tensor: ...
def _sparse_sparse_matmul(input: Tensor, other: Tensor) -> Tensor: ...
@overload
def _sparse_sum(input: Tensor) -> Tensor: ...
@overload
def _sparse_sum(input: Tensor, *, dtype: _dtype) -> Tensor: ...
@overload
def _sparse_sum(input: Tensor, dim: Union[_int, _size]) -> Tensor: ...
@overload
def _sparse_sum(input: Tensor, dim: Union[_int, _size], *, dtype: _dtype) -> Tensor: ...
def _stack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int=0, *, out: Optional[Tensor]=None) -> Tensor: ...
def _standard_gamma(input: Tensor, generator: Optional[Generator]=None) -> Tensor: ...
def _standard_gamma_grad(input: Tensor, output: Tensor) -> Tensor: ...
def _std(input: Tensor, unbiased: _bool=True) -> Tensor: ...
def _syevd_helper(input: Tensor, compute_eigenvectors: _bool, uplo: str) -> Tuple[Tensor, Tensor]: ...
def _test_serialization_subcmul(input: Tensor, other: Tensor, alpha: Number=1) -> Tensor: ...
def _trilinear(i1: Tensor, i2: Tensor, i3: Tensor, expand1: _size, expand2: _size, expand3: _size, sumdim: _size, unroll_dim: _int=1) -> Tensor: ...
def _unique(input: Tensor, sorted: _bool=True, return_inverse: _bool=False) -> Tuple[Tensor, Tensor]: ...
def _unique2(input: Tensor, sorted: _bool=True, return_inverse: _bool=False, return_counts: _bool=False) -> Tuple[Tensor, Tensor, Tensor]: ...
def _unpack_dual(dual: Tensor, level: _int) -> namedtuple_primal_tangent: ...
def _use_cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int) -> _bool: ...
def _use_cudnn_rnn_flatten_weight() -> _bool: ...
def _validate_sparse_coo_tensor_args(indices: Tensor, values: Tensor, size: _size) -> None: ...
def _var(input: Tensor, unbiased: _bool=True) -> Tensor: ...
def _weight_norm(v: Tensor, g: Tensor, dim: _int=0) -> Tensor: ...
def _weight_norm_cuda_interface(v: Tensor, g: Tensor, dim: _int=0) -> Tuple[Tensor, Tensor]: ...
def abs(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def abs_(input: Tensor) -> Tensor: ...
def absolute(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def acos(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def acos_(input: Tensor) -> Tensor: ...
def acosh(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def acosh_(input: Tensor) -> Tensor: ...
def adaptive_avg_pool1d(input: Tensor, output_size: Union[_int, _size]) -> Tensor: ...
def adaptive_max_pool1d(input: Tensor, output_size: Union[_int, _size]) -> Tuple[Tensor, Tensor]: ...
@overload
def add(input: Union[Tensor, Number], other: Union[Tensor, Number], *, alpha: Optional[Number]=1, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def add(self: Tensor, alpha: Number, other: Tensor) -> Tensor: ...
@overload
def add(self: Tensor, alpha: Number, other: Tensor, *, out: Tensor) -> Tensor: ...
@overload
def addbmm(beta: Number, self: Tensor, alpha: Number, batch1: Tensor, batch2: Tensor) -> Tensor: ...
@overload
def addbmm(beta: Number, self: Tensor, alpha: Number, batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor: ...
@overload
def addbmm(beta: Number, self: Tensor, batch1: Tensor, batch2: Tensor) -> Tensor: ...
@overload
def addbmm(beta: Number, self: Tensor, batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor: ...
@overload
def addbmm(input: Tensor, batch1: Tensor, batch2: Tensor, *, beta: Number=1, alpha: Number=1, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def addcdiv(self: Tensor, value: Number, tensor1: Tensor, tensor2: Tensor) -> Tensor: ...
@overload
def addcdiv(self: Tensor, value: Number, tensor1: Tensor, tensor2: Tensor, *, out: Tensor) -> Tensor: ...
@overload
def addcdiv(input: Tensor, tensor1: Tensor, tensor2: Tensor, *, value: Number=1, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def addcmul(self: Tensor, value: Number, tensor1: Tensor, tensor2: Tensor) -> Tensor: ...
@overload
def addcmul(self: Tensor, value: Number, tensor1: Tensor, tensor2: Tensor, *, out: Tensor) -> Tensor: ...
@overload
def addcmul(input: Tensor, tensor1: Tensor, tensor2: Tensor, *, value: Number=1, out: Optional[Tensor]=None) -> Tensor: ...
Loading ...