Learn more  » Push, build, and install  RubyGems npm packages Python packages Maven artifacts PHP packages Go Modules Bower components Debian packages RPM packages NuGet packages

neilisaac / torch   python

Repository URL to install this package:

/ _C / __init__.pyi

# @generated from torch/_C/__init__.pyi.in

import torch
from torch import Tensor
from enum import Enum
from pathlib import Path
from typing import (
    Any, BinaryIO, Callable, ContextManager, Dict, Iterable, Iterator, List,
    NamedTuple, Optional, overload, Sequence, Tuple, TypeVar, Type, Union,
    Generic, Set, AnyStr)
from torch._six import inf

from torch.types import _int, _float, _bool, _dtype, _device, _qscheme, _size, _layout, Device, Number, Storage

import builtins

# This module is defined in torch/csrc/Module.cpp

from . import _nn as _nn
from . import _onnx as _onnx
from . import _VariableFunctions as _VariableFunctions

T = TypeVar('T')

# Defined in torch/csrc/Device.cpp
class device:
    type: str  # THPDevice_type
    index: _int  # THPDevice_index

    def __get__(self, instance, owner=None) -> device: ...

    # THPDevice_pynew
    @overload
    def __init__(self, device: Union[_device, _int, str]) -> None: ...

    @overload
    def __init__(self, type: str, index: _int) -> None: ...

    def __reduce__(self) -> Tuple[Any, ...]: ...  # THPDevice_reduce

# Defined in torch/csrc/Stream.cpp
class Stream:
    _cdata: _int  # Stream handle
    device: device # The device of the stream

    ...

# Defined in torch/csrc/Size.cpp
class Size(Tuple[_int, ...]):
    # TODO: __reduce__

    @overload
    def __getitem__(self: Size, key: _int) -> _int: ...

    @overload
    def __getitem__(self: Size, key: slice) -> Size: ...

    def numel(self: Size) -> _int: ...

    ...

# Defined in torch/csrc/Dtype.cpp
class dtype:
    # TODO: __reduce__
    is_floating_point: _bool
    is_complex: _bool
    is_signed: _bool
    ...

# Defined in torch/csrc/TypeInfo.cpp
class iinfo:
    bits: _int
    min: _int
    max: _int
    dtype: str

    def __init__(self, dtype: _dtype) -> None: ...

class finfo:
    bits: _float
    min: _float
    max: _float
    eps: _float
    tiny: _float
    resolution: _float
    dtype: str

    @overload
    def __init__(self, dtype: _dtype) -> None: ...

    @overload
    def __init__(self) -> None: ...

float32: dtype = ...
float: dtype = ...
float64: dtype = ...
double: dtype = ...
float16: dtype = ...
bfloat16: dtype = ...
half: dtype = ...
uint8: dtype = ...
int8: dtype = ...
int16: dtype = ...
short: dtype = ...
int32: dtype = ...
int: dtype = ...
int64: dtype = ...
long: dtype = ...
complex32: dtype = ...
complex64: dtype = ...
cfloat: dtype = ...
complex128: dtype = ...
cdouble: dtype = ...
quint8: dtype = ...
qint8: dtype = ...
qint32: dtype = ...
bool: dtype = ...
quint4x2: dtype = ...

# Defined in torch/csrc/Layout.cpp
class layout:
    ...

# Defined in torch/csrc/utils/disable_torch_function.cpp
def DisableTorchFunction(): ...

# Defined in torch/csrc/utils/tensor_layouts.cpp
strided : layout = ...
sparse_coo : layout = ...
_mkldnn : layout = ...

# Defined in torch/csrc/MemoryFormat.cpp
class memory_format: ...

# Defined in torch/csrc/utils/tensor_memoryformats.cpp
contiguous_format: memory_format = ...
channels_last: memory_format = ...
channels_last_3d: memory_format = ...
preserve_format: memory_format = ...

# Defined in torch/csrc/QScheme.cpp
class qscheme: ...

# Defined in torch/csrc/utils/tensor_qschemes.h
per_tensor_affine: qscheme = ...
per_channel_affine: qscheme = ...
per_tensor_symmetric: qscheme = ...
per_channel_symmetric: qscheme = ...
per_channel_affine_float_qparams: qscheme = ...

# Defined in torch/csrc/autograd/python_function.cpp
class _FunctionBase(object):
    ...

# Defined in torch/csrc/autograd/python_legacy_variable.cpp
class _LegacyVariableBase(object):
    def __init__(
        self,
        data: Optional[Tensor]=...,
        requires_grad: Optional[_bool]=...,
        volatile: Optional[_bool]=...,
        _grad_fn: Optional[_FunctionBase]=...
    ) -> None: ...

# Defined in torch/csrc/jit/python/init.cpp
class IODescriptor: ...

class JITException: ...

class Future(object):
  def __init__(self) -> None: ...
  def done(self) -> _bool: ...
  def wait(self) -> Any: ...
  def add_done_callback(self, callback: Callable) -> None: ...
  def then(self, callback: Callable) -> Future: ...
  def set_result(self, result: Any) -> None: ...
  def _set_unwrap_func(self, callback: Callable) -> None: ...

def _jit_set_num_profiled_runs(num: _size) -> _size: ...

# Defined in torch/csrc/jit/passes/xnnpack_rewrite.h
class MobileOptimizerType:
    ...

CONV_BN_FUSION: MobileOptimizerType
INSERT_FOLD_PREPACK_OPS: MobileOptimizerType
REMOVE_DROPOUT: MobileOptimizerType
FUSE_ADD_RELU: MobileOptimizerType
HOIST_CONV_PACKED_PARAMS: MobileOptimizerType

def fork(*args: Any, **kwargs: Any) -> Future: ...
def wait(fut: Future) -> Any: ...
def _collect_all(futures: List[Future]) -> Future: ...

def unify_type_list(types: List[JitType]) -> JitType: ...
def _freeze_module(module: ScriptModule,
                   preserved_attrs: List[str] = [],
                   freeze_interfaces: _bool = True,
                   preserveParameters: _bool = True) -> ScriptModule: ...
def _jit_pass_optimize_frozen_graph(Graph) -> None: ...
def _jit_pass_fold_frozen_conv_bn(graph: Graph): ...
def _jit_pass_fold_frozen_conv_add_or_sub(graph: Graph): ...
def _jit_pass_fold_frozen_conv_mul_or_div(graph: Graph): ...
def _jit_pass_remove_dropout(module: 'torch.jit.ScriptModule'): ...

def _is_tracing() -> _bool: ...
def _jit_init() -> _bool: ...
def _jit_flatten(arg: Any) -> Tuple[List[Tensor], IODescriptor]: ...
def _jit_unflatten(vars: List[Tensor], desc: IODescriptor) -> Any: ...
def _jit_get_operation(op_name: str) -> Callable: ...
def _jit_pass_optimize_for_mobile(module: 'torch.jit.ScriptModule',
                                  optimization_blocklist: Set[MobileOptimizerType],
                                  preserved_methods: List[AnyStr]) -> 'torch.jit.ScriptModule': ...
def _jit_pass_vulkan_optimize_for_mobile(module: 'torch.jit.ScriptModule',
                                         preserved_methods: List[AnyStr]) -> 'torch.jit.ScriptModule': ...
def _jit_pass_metal_optimize_for_mobile(module: 'torch.jit.ScriptModule',
                                         preserved_methods: List[AnyStr]) -> 'torch.jit.ScriptModule': ...
def _jit_pass_inline(Graph) -> None: ...
def _jit_pass_constant_propagation(Graph) -> None: ...
def _jit_get_schemas_for_operator(name :str) -> List[FunctionSchema]: ...
def _jit_check_alias_annotation(g: Graph, args: Tuple[Any, ...], unqualified_op_name: str): ...
def _jit_can_fuse_on_cpu() -> _bool: ...
def _jit_can_fuse_on_gpu() -> _bool: ...
def _debug_get_fusion_group_inlining() -> _bool: ...
def _debug_set_fusion_group_inlining(enable: _bool): ...
def _jit_texpr_fuser_enabled() -> _bool: ...
def _jit_nvfuser_enabled() -> _bool: ...
def _llvm_enabled() -> _bool: ...
def _jit_override_can_fuse_on_cpu(override: _bool): ...
def _jit_override_can_fuse_on_gpu(override: _bool): ...
def _jit_set_texpr_fuser_enabled(enable: _bool): ...
def _jit_set_nvfuser_enabled(enable: _bool) -> _bool: ...
def _jit_pass_canonicalize(graph: Graph): ...
def _jit_pass_erase_shape_information(graph: Graph): ...
def _jit_pass_fold_convbn(module: 'torch.jit.ScriptModule'): ...
def _jit_pass_insert_observers(module: 'torch.jit.ScriptModule',
                               method_name: str,
                               qconfig_dict: Dict[str, Any],
                               inplace: _bool,
                               quant_type: _int): ...
def _jit_pass_insert_quant_dequant(module: 'torch.jit.ScriptModule',
                                   method_name: str,
                                   inplace: _bool,
                                   debug: _bool,
                                   quant_type: _int): ...
def _jit_pass_quant_finalize(module: 'torch.jit.ScriptModule',
                             quant_type: _int,
                             preserved_attrs: Sequence[str]): ...
def _jit_set_profiling_executor(profiling_flag: _bool) -> _bool: ...
def _jit_set_profiling_mode(profiling_flag: _bool) -> _bool: ...
def _jit_try_infer_type(obj: Any) -> InferredType: ...
def _jit_get_trigger_value(trigger_name: str) -> _int: ...

# Defined in torch/csrc/jit/python/script_init.cpp
ResolutionCallback = Callable[[str], Callable[..., Any]]

# Defined in torch/csrc/jit/python/script_init.cpp
#        and torch/csrc/jit/python/init.cpp
def _create_function_from_graph(qualname: str, graph: Graph) -> Graph: ...
def _debug_set_autodiff_subgraph_inlining(disabled: _bool) -> None: ...
def _ivalue_tags_match(lhs: ScriptModule, rhs: ScriptModule) -> _bool: ...
def _jit_assert_is_instance(obj: Any, type: JitType): ...
def _jit_clear_class_registry() -> None: ...
def _jit_set_emit_hooks(ModuleHook: Optional[Callable], FunctionHook: Optional[Callable]) -> None: ...
def _jit_get_emit_hooks() -> Tuple[Callable, Callable]: ...
def _load_for_lite_interpreter(filename: Union[str, Path], map_location: Union[_device, str, None]): ...
def _load_for_lite_interpreter_from_buffer(buffer: BinaryIO, map_location: Union[_device, str, None]): ...
def _logging_set_logger(logger: LoggerBase) -> LoggerBase: ...
def _get_graph_executor_optimize() -> _bool: ...
def _set_graph_executor_optimize(optimize: _bool): ...
def _export_opnames(module: ScriptModule) -> List[str]: ...
def _create_function_from_trace(
    qualname: str,
    func: Callable[..., Any],
    input_tuple: Tuple[Any, ...],
    var_lookup_fn: Callable[[Tensor], str],
    strict: _bool,
    force_outplace: _bool
) -> Tuple[Graph, Stack]: ...
def _jit_is_script_object(obj: Any) -> _bool: ...
def _last_executed_optimized_graph() -> Graph: ...
def parse_type_comment(comment: str) -> Decl: ...
def merge_type_from_type_comment(decl: Decl, type_annotation_decl: Decl, is_method: _bool) -> Decl: ...
def parse_ir(input: str) -> Graph: ...
def parse_schema(schema: str) -> FunctionSchema: ...
def get_device(input: Tensor) -> _int: ...
def _resolve_type_from_object(obj: Any, range: SourceRange, rcb: ResolutionCallback) -> JitType: ...
def _create_module_with_type(ty: JitType) -> ScriptModule: ...
def _run_emit_module_hook(m: ScriptModule): ...
def _replace_overloaded_method_decl(overload_decl: Decl, implementation_def: Def, new_name: str) -> Def: ...

def _jit_pass_lower_all_tuples(graph: Graph) -> None: ...
def _jit_pass_onnx_set_dynamic_input_shape(graph: Graph, dynamic_axes: Dict[str, Dict[_int, str]], input_names: List[str]) -> None: ...
def _jit_pass_onnx_graph_shape_type_inference(graph: Graph, paramsDict: Dict[str, IValue], opset_version: _int) -> None: ...
def _jit_pass_onnx_assign_output_shape(graph: Graph, tensors: List[Tensor], desc: IODescriptor, onnx_shape_inference: _bool = False) -> None: ...
def _jit_pass_onnx_remove_inplace_ops_for_onnx(graph: Graph, module: Module) -> None: ...
def _jit_pass_remove_inplace_ops(graph: Graph) -> None: ...
def _jit_pass_canonicalize_graph_fuser_ops(graph: Graph) -> None: ...
def _jit_pass_peephole(graph: Graph, addmm_fusion_enabled: _bool) -> None: ...
def _jit_pass_fuse_addmm(graph: Graph) -> None: ...
def _jit_pass_onnx_preprocess(graph: Graph) -> None: ...
def _jit_pass_onnx_prepare_inplace_ops_for_onnx(graph: Graph) -> None: ...
def _jit_pass_prepare_division_for_onnx(graph: Graph) -> None: ...
def _jit_pass_onnx_remove_print(graph: Graph) -> None: ...
def _jit_pass_onnx_preprocess_caffe2(graph: Graph) -> None: ...
def _jit_pass_onnx_unpack_quantized_weights(
    graph: Graph,
    paramsDict: Dict[str, IValue]
) -> Dict[str, IValue]: ...
def _jit_pass_onnx_quantization_insert_permutes(
    graph: Graph,
    paramsDict: Dict[str, IValue]
) -> Dict[str, IValue]: ...
def _jit_pass_custom_pattern_based_rewrite_graph(pattern: str, fused_node_name: str, graph: Graph) -> None: ...
def _jit_onnx_list_model_parameters(module: ScriptModule) -> Tuple[ScriptModule, List[IValue]]: ...
def _jit_pass_erase_number_types(graph: Graph) -> None: ...
def _jit_pass_onnx(graph: Graph, _jit_pass_onnx: _onnx.OperatorExportTypes) -> Graph: ...
def _jit_pass_onnx_scalar_type_analysis(graph: Graph) -> None: ...
def _jit_pass_onnx_peephole(graph: Graph, opset_version: _int, fixed_batch_size: _bool) -> None: ...
def _jit_pass_dce_allow_deleting_nodes_with_side_effects(graph: Graph) -> None: ...
def _jit_pass_onnx_function_substitution(graph: Graph) -> None: ...
def _jit_pass_onnx_fold_if(graph: Graph) -> None: ...
def _jit_pass_lower_graph(graph: Graph, m: Module) -> Tuple[Graph, List[IValue]]: ...
def _jit_pass_inline_fork_wait(graph: Graph) -> None: ...
def _jit_pass_onnx_eval_peephole(graph: Graph, paramsDict: Dict[str, IValue]) -> Dict[str, IValue]: ...
def _jit_pass_onnx_constant_fold(graph: Graph, paramsDict: Dict[str, IValue], opset_version: _int) -> Dict[str, IValue]: ...
def _jit_pass_onnx_eliminate_unused_items(graph: Graph, paramsDict: Dict[str, IValue]) -> Dict[str, IValue]: ...
def _jit_pass_onnx_cast_all_constant_to_floating(graph: Graph) -> None: ...
def _jit_pass_filter_non_tensor_arguments(params: Dict[str, IValue]) -> Dict[str, Tensor]: ...
def _jit_decay_packed_param_input_types(graph: Graph) -> None: ...
def _jit_pass_onnx_node_shape_type_inference(n: Node, paramsDict: Dict[str, IValue], opset_version: _int) -> None: ...
def _jit_pass_onnx_block(
    old_block: Block,
    new_block: Block,
    operator_export_type: _onnx.OperatorExportTypes,
    env: Dict[Value, Value]
) -> None: ...
def _jit_pass_fixup_onnx_controlflow_node(n: Node, opset_version: _int) -> Node: ...

def _jit_script_interface_compile(name: str, class_def: ClassDef, rcb: ResolutionCallback, is_module: _bool): ...
def _jit_script_compile_overload(
    qualname: str,
    overload_decl: Decl,
    implementation_def: Def,
    rcb: ResolutionCallback,
Loading ...