Learn more  » Push, build, and install  RubyGems npm packages Python packages Maven artifacts PHP packages Go Modules Bower components Debian packages RPM packages NuGet packages

edgify / torch   python

Repository URL to install this package:

/ _C / _VariableFunctions.pyi

# @generated from torch/_C/_VariableFunctions.pyi.in

from torch import Tensor, Generator, strided, memory_format, contiguous_format, strided, inf
from typing import List, Tuple, Optional, Union, Any, ContextManager, Callable, overload, Iterator, NamedTuple, Sequence, Literal, TypeVar

from torch.types import _int, _float, _bool, Number, _dtype, _device, _qscheme, _size, _layout, SymInt, Device
import torch

import builtins

@overload
def __and__(input: Tensor, other: Tensor) -> Tensor: ...
@overload
def __and__(input: Tensor, other: Number) -> Tensor: ...
@overload
def __lshift__(input: Tensor, other: Tensor) -> Tensor: ...
@overload
def __lshift__(input: Tensor, other: Number) -> Tensor: ...
@overload
def __or__(input: Tensor, other: Tensor) -> Tensor: ...
@overload
def __or__(input: Tensor, other: Number) -> Tensor: ...
@overload
def __rshift__(input: Tensor, other: Tensor) -> Tensor: ...
@overload
def __rshift__(input: Tensor, other: Number) -> Tensor: ...
@overload
def __xor__(input: Tensor, other: Tensor) -> Tensor: ...
@overload
def __xor__(input: Tensor, other: Number) -> Tensor: ...
def _adaptive_avg_pool2d(input: Tensor, output_size: Sequence[Union[_int, SymInt]]) -> Tensor: ...
def _adaptive_avg_pool3d(input: Tensor, output_size: Sequence[Union[_int, SymInt]]) -> Tensor: ...
def _add_batch_dim(input: Tensor, batch_dim: _int, level: _int) -> Tensor: ...
@overload
def _add_relu(input: Tensor, other: Tensor, *, alpha: Number=1, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def _add_relu(input: Tensor, other: Number, alpha: Number=1) -> Tensor: ...
@overload
def _add_relu_(input: Tensor, other: Tensor, *, alpha: Number=1) -> Tensor: ...
@overload
def _add_relu_(input: Tensor, other: Number, alpha: Number=1) -> Tensor: ...
def _addmm_activation(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Number=1, alpha: Number=1, use_gelu: _bool=False, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def _aminmax(input: Tensor) -> Tuple[Tensor, Tensor]: ...
@overload
def _aminmax(input: Tensor, dim: _int, keepdim: _bool=False) -> Tuple[Tensor, Tensor]: ...
def _amp_foreach_non_finite_check_and_unscale_(self: Union[Tuple[Tensor, ...], List[Tensor]], found_inf: Tensor, inv_scale: Tensor) -> None: ...
def _amp_update_scale_(input: Tensor, growth_tracker: Tensor, found_inf: Tensor, scale_growth_factor: _float, scale_backoff_factor: _float, growth_interval: _int) -> Tensor: ...
def _assert_async(input: Tensor) -> None: ...
def _assert_tensor_metadata(a: Tensor, size: Optional[_size]=None, stride: Optional[_size]=None, dtype: Optional[_dtype]=None) -> None: ...
def _batch_norm_impl_index(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, momentum: _float, eps: _float, cudnn_enabled: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor, _int]: ...
def _cast_Byte(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
def _cast_Char(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
def _cast_Double(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
def _cast_Float(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
def _cast_Half(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
def _cast_Int(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
def _cast_Long(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
def _cast_Short(input: Tensor, non_blocking: _bool=False) -> Tensor: ...
def _choose_qparams_per_tensor(input: Tensor, reduce_range: _bool=False) -> Tuple[_float, _int]: ...
def _chunk_grad_outputs_efficient_attention(query: Tensor, key: Tensor, value: Tensor, is_causal: _bool=False) -> _bool: ...
def _coalesce(input: Tensor) -> Tensor: ...
def _compute_linear_combination(input: Tensor, coefficients: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def _conj(input: Tensor) -> Tensor: ...
def _conj_copy(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def _conj_physical(input: Tensor) -> Tensor: ...
def _convert_indices_from_coo_to_csr(input: Tensor, size: _int, *, out_int32: _bool=False, out: Optional[Tensor]=None) -> Tensor: ...
def _convert_indices_from_csr_to_coo(crow_indices: Tensor, col_indices: Tensor, *, out_int32: _bool=False, transpose: _bool=False, out: Optional[Tensor]=None) -> Tensor: ...
@overload
def _convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: _size, padding: _size, dilation: _size, transposed: _bool, output_padding: _size, groups: _int, benchmark: _bool, deterministic: _bool, cudnn_enabled: _bool) -> Tensor: ...
@overload
def _convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: _size, padding: Sequence[Union[_int, SymInt]], dilation: _size, transposed: _bool, output_padding: Sequence[Union[_int, SymInt]], groups: _int, benchmark: _bool, deterministic: _bool, cudnn_enabled: _bool, allow_tf32: _bool) -> Tensor: ...
def _convolution_mode(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: _size, padding: str, dilation: _size, groups: _int) -> Tensor: ...
def _copy_from(input: Tensor, dst: Tensor, non_blocking: _bool=False) -> Tensor: ...
def _copy_from_and_resize(input: Tensor, dst: Tensor) -> Tensor: ...
@overload
def _ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int=0, zero_infinity: _bool=False) -> Tuple[Tensor, Tensor]: ...
@overload
def _ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: _int=0, zero_infinity: _bool=False) -> Tuple[Tensor, Tensor]: ...
@overload
def _cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int, deterministic: _bool, zero_infinity: _bool) -> Tuple[Tensor, Tensor]: ...
@overload
def _cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: _int, deterministic: _bool, zero_infinity: _bool) -> Tuple[Tensor, Tensor]: ...
def _cudnn_init_dropout_state(dropout: _float, train: _bool, dropout_seed: _int, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
def _cudnn_rnn(input: Tensor, weight: Union[Tuple[Tensor, ...], List[Tensor]], weight_stride0: _int, weight_buf: Optional[Tensor], hx: Tensor, cx: Optional[Tensor], mode: _int, hidden_size: Union[_int, SymInt], proj_size: Union[_int, SymInt], num_layers: _int, batch_first: _bool, dropout: _float, train: _bool, bidirectional: _bool, batch_sizes: Sequence[Union[_int, SymInt]], dropout_state: Optional[Tensor]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: ...
def _cudnn_rnn_flatten_weight(weight_arr: Union[Tuple[Tensor, ...], List[Tensor]], weight_stride0: _int, input_size: Union[_int, SymInt], mode: _int, hidden_size: Union[_int, SymInt], proj_size: Union[_int, SymInt], num_layers: _int, batch_first: _bool, bidirectional: _bool) -> Tensor: ...
def _cufft_clear_plan_cache(device_index: _int) -> None: ...
def _cufft_get_plan_cache_max_size(device_index: _int) -> _int: ...
def _cufft_get_plan_cache_size(device_index: _int) -> _int: ...
def _cufft_set_plan_cache_max_size(device_index: _int, max_size: _int) -> None: ...
def _cummax_helper(input: Tensor, values: Tensor, indices: Tensor, dim: _int) -> None: ...
def _cummin_helper(input: Tensor, values: Tensor, indices: Tensor, dim: _int) -> None: ...
def _debug_has_internal_overlap(input: Tensor) -> _int: ...
def _dim_arange(like: Tensor, dim: _int) -> Tensor: ...
def _dirichlet_grad(x: Tensor, alpha: Tensor, total: Tensor) -> Tensor: ...
def _disable_functionalization(): ...
@overload
def _efficientzerotensor(size: _size, *, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
@overload
def _efficientzerotensor(*size: _int, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
def _embedding_bag(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool=False, mode: _int=0, sparse: _bool=False, per_sample_weights: Optional[Tensor]=None, include_last_offset: _bool=False, padding_idx: _int=-1) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
def _embedding_bag_forward_only(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool=False, mode: _int=0, sparse: _bool=False, per_sample_weights: Optional[Tensor]=None, include_last_offset: _bool=False, padding_idx: _int=-1) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ...
@overload
def _empty_affine_quantized(size: _size, *, scale: _float=1, zero_point: _int=0, memory_format: Optional[memory_format]=contiguous_format, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
@overload
def _empty_affine_quantized(*size: _int, scale: _float=1, zero_point: _int=0, memory_format: Optional[memory_format]=contiguous_format, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
@overload
def _empty_per_channel_affine_quantized(size: _size, *, scales: Tensor, zero_points: Tensor, axis: _int, memory_format: Optional[memory_format]=contiguous_format, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
@overload
def _empty_per_channel_affine_quantized(*size: _int, scales: Tensor, zero_points: Tensor, axis: _int, memory_format: Optional[memory_format]=contiguous_format, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor: ...
def _enable_functionalization(*, reapply_views: _bool = False): ...
def _euclidean_dist(x1: Tensor, x2: Tensor) -> Tensor: ...
def _fake_quantize_learnable_per_channel_affine(input: Tensor, scale: Tensor, zero_point: Tensor, axis: _int, quant_min: _int, quant_max: _int, grad_factor: _float=1.0) -> Tensor: ...
def _fake_quantize_learnable_per_tensor_affine(input: Tensor, scale: Tensor, zero_point: Tensor, quant_min: _int, quant_max: _int, grad_factor: _float=1.0) -> Tensor: ...
def _fake_quantize_per_tensor_affine_cachemask_tensor_qparams(input: Tensor, scale: Tensor, zero_point: Tensor, fake_quant_enabled: Tensor, quant_min: _int, quant_max: _int) -> torch.return_types._fake_quantize_per_tensor_affine_cachemask_tensor_qparams: ...
def _fft_c2c(input: Tensor, dim: Sequence[Union[_int, SymInt]], normalization: _int, forward: _bool, *, out: Optional[Tensor]=None) -> Tensor: ...
def _fft_c2r(input: Tensor, dim: _size, normalization: _int, last_dim_size: _int, *, out: Optional[Tensor]=None) -> Tensor: ...
def _fft_r2c(input: Tensor, dim: _size, normalization: _int, onesided: _bool, *, out: Optional[Tensor]=None) -> Tensor: ...
def _foobar(input: Tensor, arg1: _bool=True, arg2: _bool=True, *, arg3: _bool=True) -> Tensor: ...
def _foreach_abs(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
def _foreach_abs_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_acos(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
def _foreach_acos_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
@overload
def _foreach_add(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> List[Tensor]: ...
@overload
def _foreach_add(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> List[Tensor]: ...
@overload
def _foreach_add(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Number=1) -> List[Tensor]: ...
@overload
def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> None: ...
@overload
def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> None: ...
@overload
def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Number=1) -> None: ...
@overload
def _foreach_addcdiv(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> List[Tensor]: ...
@overload
def _foreach_addcdiv(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> List[Tensor]: ...
@overload
def _foreach_addcdiv(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Number=1) -> List[Tensor]: ...
@overload
def _foreach_addcdiv_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> None: ...
@overload
def _foreach_addcdiv_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> None: ...
@overload
def _foreach_addcdiv_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Number=1) -> None: ...
@overload
def _foreach_addcmul(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> List[Tensor]: ...
@overload
def _foreach_addcmul(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> List[Tensor]: ...
@overload
def _foreach_addcmul(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Number=1) -> List[Tensor]: ...
@overload
def _foreach_addcmul_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> None: ...
@overload
def _foreach_addcmul_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> None: ...
@overload
def _foreach_addcmul_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Number=1) -> None: ...
def _foreach_asin(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
def _foreach_asin_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_atan(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
def _foreach_atan_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_ceil(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
def _foreach_ceil_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
@overload
def _foreach_clamp_max(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> List[Tensor]: ...
@overload
def _foreach_clamp_max(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> List[Tensor]: ...
@overload
def _foreach_clamp_max(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
@overload
def _foreach_clamp_max_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> None: ...
@overload
def _foreach_clamp_max_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> None: ...
@overload
def _foreach_clamp_max_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
@overload
def _foreach_clamp_min(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> List[Tensor]: ...
@overload
def _foreach_clamp_min(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> List[Tensor]: ...
@overload
def _foreach_clamp_min(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
@overload
def _foreach_clamp_min_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> None: ...
@overload
def _foreach_clamp_min_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> None: ...
@overload
def _foreach_clamp_min_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_cos(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
def _foreach_cos_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_cosh(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
def _foreach_cosh_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
@overload
def _foreach_div(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> List[Tensor]: ...
@overload
def _foreach_div(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> List[Tensor]: ...
@overload
def _foreach_div(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
@overload
def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> None: ...
@overload
def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> None: ...
@overload
def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_erf(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
def _foreach_erf_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_erfc(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
def _foreach_erfc_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_exp(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
def _foreach_exp_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_expm1(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
def _foreach_expm1_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_floor(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
def _foreach_floor_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_frac(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
def _foreach_frac_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
@overload
def _foreach_lerp(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weight: Number) -> List[Tensor]: ...
@overload
def _foreach_lerp(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weights: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
@overload
def _foreach_lerp_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weight: Number) -> None: ...
@overload
def _foreach_lerp_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weights: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_lgamma(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
def _foreach_lgamma_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_log(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
def _foreach_log10(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
def _foreach_log10_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_log1p(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
def _foreach_log1p_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_log2(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
def _foreach_log2_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_log_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
@overload
def _foreach_maximum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> List[Tensor]: ...
@overload
def _foreach_maximum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> List[Tensor]: ...
@overload
def _foreach_maximum(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
@overload
def _foreach_maximum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> None: ...
@overload
def _foreach_maximum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> None: ...
@overload
def _foreach_maximum_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
@overload
def _foreach_minimum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> List[Tensor]: ...
@overload
def _foreach_minimum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> List[Tensor]: ...
@overload
def _foreach_minimum(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
@overload
def _foreach_minimum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> None: ...
@overload
def _foreach_minimum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> None: ...
@overload
def _foreach_minimum_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
@overload
def _foreach_mul(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> List[Tensor]: ...
@overload
def _foreach_mul(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> List[Tensor]: ...
@overload
def _foreach_mul(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
@overload
def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> None: ...
@overload
def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> None: ...
@overload
def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_neg(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
def _foreach_neg_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_norm(self: Union[Tuple[Tensor, ...], List[Tensor]], ord: Number=2) -> List[Tensor]: ...
def _foreach_reciprocal(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
def _foreach_reciprocal_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_round(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
def _foreach_round_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_sigmoid(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
def _foreach_sigmoid_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_sin(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
def _foreach_sin_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_sinh(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
def _foreach_sinh_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_sqrt(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
def _foreach_sqrt_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
@overload
def _foreach_sub(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> List[Tensor]: ...
@overload
def _foreach_sub(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> List[Tensor]: ...
@overload
def _foreach_sub(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Number=1) -> List[Tensor]: ...
@overload
def _foreach_sub_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Number) -> None: ...
@overload
def _foreach_sub_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Number]) -> None: ...
@overload
def _foreach_sub_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Number=1) -> None: ...
def _foreach_tan(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
def _foreach_tan_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_tanh(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
def _foreach_tanh_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_trunc(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> List[Tensor]: ...
def _foreach_trunc_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _foreach_zero_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ...
def _from_functional_tensor(t: Tensor) -> Tensor: ...
def _fused_adam_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], exp_avgs: Union[Tuple[Tensor, ...], List[Tensor]], exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], max_exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], state_steps: Union[Tuple[Tensor, ...], List[Tensor]], *, lr: _float, beta1: _float, beta2: _float, weight_decay: _float, eps: _float, amsgrad: _bool, maximize: _bool, grad_scale: Optional[Tensor]=None, found_inf: Optional[Tensor]=None) -> None: ...
def _fused_adamw_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], exp_avgs: Union[Tuple[Tensor, ...], List[Tensor]], exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], max_exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], state_steps: Union[Tuple[Tensor, ...], List[Tensor]], *, lr: _float, beta1: _float, beta2: _float, weight_decay: _float, eps: _float, amsgrad: _bool, maximize: _bool, grad_scale: Optional[Tensor]=None, found_inf: Optional[Tensor]=None) -> None: ...
def _fused_dropout(input: Tensor, p: _float, generator: Optional[Generator]=None) -> Tuple[Tensor, Tensor]: ...
def _fused_moving_avg_obs_fq_helper(input: Tensor, observer_on: Tensor, fake_quant_on: Tensor, running_min: Tensor, running_max: Tensor, scale: Tensor, zero_point: Tensor, averaging_const: _float, quant_min: _int, quant_max: _int, ch_axis: _int, per_row_fake_quant: _bool=False, symmetric_quant: _bool=False) -> torch.return_types._fused_moving_avg_obs_fq_helper: ...
def _fused_sdp_choice(query: Tensor, key: Tensor, value: Tensor, attn_mask: Optional[Tensor]=None, dropout_p: _float=0.0, is_causal: _bool=False) -> _int: ...
def _fw_primal_copy(input: Tensor, level: _int, *, out: Optional[Tensor]=None) -> Tensor: ...
def _grid_sampler_2d_cpu_fallback(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ...
def _has_compatible_shallow_copy_type(input: Tensor, from_: Tensor) -> _bool: ...
def _histogramdd_bin_edges(input: Tensor, bins: _size, *, range: Optional[Sequence[_float]]=None, weight: Optional[Tensor]=None, density: _bool=False) -> List[Tensor]: ...
def _histogramdd_from_bin_cts(input: Tensor, bins: _size, *, range: Optional[Sequence[_float]]=None, weight: Optional[Tensor]=None, density: _bool=False) -> Tensor: ...
def _histogramdd_from_bin_tensors(input: Tensor, bins: Union[Tuple[Tensor, ...], List[Tensor]], *, weight: Optional[Tensor]=None, density: _bool=False) -> Tensor: ...
def _index_put_impl_(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool=False, unsafe: _bool=False) -> Tensor: ...
def _indices_copy(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor: ...
def _is_all_true(input: Tensor) -> Tensor: ...
def _is_any_true(input: Tensor) -> Tensor: ...
def _is_functional_tensor(t: Tensor) -> _bool: ...
def _is_zerotensor(input: Tensor) -> _bool: ...
def _linalg_check_errors(info: Tensor, api_name: str, *, is_matrix: _bool) -> None: ...
def _linalg_det(A: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types._linalg_det: ...
def _linalg_eigh(A: Tensor, UPLO: str="L", compute_v: _bool=True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types._linalg_eigh: ...
def _linalg_slogdet(A: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types._linalg_slogdet: ...
def _linalg_solve_ex(A: Tensor, B: Tensor, *, left: _bool=True, check_errors: _bool=False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types._linalg_solve_ex: ...
def _linalg_svd(A: Tensor, full_matrices: _bool=False, compute_uv: _bool=True, *, driver: Optional[str]=None, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor]]=None) -> torch.return_types._linalg_svd: ...
def _log_softmax(input: Tensor, dim: _int, half_to_float: _bool, *, out: Optional[Tensor]=None) -> Tensor: ...
def _log_softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input_dtype: _dtype, *, out: Optional[Tensor]=None) -> Tensor: ...
def _logcumsumexp(input: Tensor, dim: _int, *, out: Optional[Tensor]=None) -> Tensor: ...
def _lstm_mps(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]: ...
def _lu_with_info(input: Tensor, pivot: _bool=True, check_errors: _bool=True) -> torch.return_types._lu_with_info: ...
def _make_dual(primal: Tensor, tangent: Tensor, level: _int) -> Tensor: ...
def _make_dual_copy(primal: Tensor, tangent: Tensor, level: _int, *, out: Optional[Tensor]=None) -> Tensor: ...
def _make_per_channel_quantized_tensor(input: Tensor, scale: Tensor, zero_point: Tensor, axis: _int) -> Tensor: ...
def _make_per_tensor_quantized_tensor(input: Tensor, scale: _float, zero_point: _int) -> Tensor: ...
def _masked_scale(input: Tensor, mask: Tensor, scale: _float) -> Tensor: ...
def _masked_softmax(input: Tensor, mask: Tensor, dim: Optional[_int]=None, mask_type: Optional[_int]=None) -> Tensor: ...
def _mkldnn_reshape(input: Tensor, shape: _size) -> Tensor: ...
def _mkldnn_transpose(input: Tensor, dim0: _int, dim1: _int) -> Tensor: ...
def _mkldnn_transpose_(input: Tensor, dim0: _int, dim1: _int) -> Tensor: ...
def _mps_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: _size, stride: _size, dilation: _size, groups: _int) -> Tensor: ...
def _mps_convolution_transpose(input: Tensor, weight: Tensor, padding: _size, output_padding: _size, stride: _size, dilation: _size, groups: _int) -> Tensor: ...
Loading ...