Learn more  » Push, build, and install  RubyGems npm packages Python packages Maven artifacts PHP packages Go Modules Bower components Debian packages RPM packages NuGet packages

edgify / torch   python

Repository URL to install this package:

Version: 2.0.1+cpu 

/ _C / __init__.pyi

# @generated from torch/_C/__init__.pyi.in

import torch
from torch.package import PackageExporter
from torch import Tensor, inf
from torch.autograd.graph import Node as _Node
from enum import Enum
from pathlib import Path
from typing import (
    Any, BinaryIO, Callable, ContextManager, Dict, Iterable, Iterator, List,
    NamedTuple, Optional, overload, Sequence, Tuple, TypeVar, Type, Union,
    Literal, Generic, Set, AnyStr)

from torch.types import (
    _int, _float, _bool, _dtype, _device, _qscheme, _size, _layout, Device, Number, Storage, SymInt, _dispatchkey
)
from torch.storage import TypedStorage

import builtins

# This module is defined in torch/csrc/Module.cpp

from . import _nn as _nn
from . import _onnx as _onnx
from . import _VariableFunctions as _VariableFunctions
from . import _functorch as _functorch
from . import _lazy as _lazy
from . import _lazy_ts_backend as _lazy_ts_backend

T = TypeVar('T')
S = TypeVar("S", bound="torch.Tensor")

# Defined in torch/csrc/Device.cpp
class device:
    type: str  # THPDevice_type
    index: _int  # THPDevice_index

    def __get__(self, instance, owner=None) -> device: ...

    # THPDevice_pynew
    @overload
    def __init__(self, device: Union[_device, _int, str]) -> None: ...

    @overload
    def __init__(self, type: str, index: _int) -> None: ...

    # Uncomment if we ever make torch.device a decorator
    # def __call__(self, func: T) -> T: ...

    def __enter__(self) -> "device": ...

    def __exit__(self, exc_type, exc_val, exc_tb) -> None: ...

    def __reduce__(self) -> Tuple[Any, ...]: ...  # THPDevice_reduce

# Defined in torch/csrc/Stream.cpp
class Stream:
    stream_id: _int  # Stream id
    device_index: _int
    device_type: _int

    device: device # The device of the stream

    ...

# Defined in torch/csrc/Size.cpp
class Size(Tuple[_int, ...]):
    # TODO: __reduce__

    @overload  # type: ignore[override]
    def __getitem__(self: Size, key: _int) -> _int: ...

    @overload
    def __getitem__(self: Size, key: slice) -> Size: ...

    def numel(self: Size) -> _int: ...

    ...

# Defined in torch/csrc/Dtype.cpp
class dtype:
    # TODO: __reduce__
    is_floating_point: _bool
    is_complex: _bool
    is_signed: _bool
    ...

# Defined in torch/csrc/TypeInfo.cpp
class iinfo:
    bits: _int
    min: _int
    max: _int
    dtype: str

    def __init__(self, dtype: _dtype) -> None: ...

class finfo:
    bits: _int
    min: _float
    max: _float
    eps: _float
    tiny: _float
    smallest_normal: _float
    resolution: _float
    dtype: str

    @overload
    def __init__(self, dtype: _dtype) -> None: ...

    @overload
    def __init__(self) -> None: ...

float32: dtype = ...
float: dtype = ...
float64: dtype = ...
double: dtype = ...
float16: dtype = ...
bfloat16: dtype = ...
half: dtype = ...
uint8: dtype = ...
int8: dtype = ...
int16: dtype = ...
short: dtype = ...
int32: dtype = ...
int: dtype = ...
int64: dtype = ...
long: dtype = ...
complex32: dtype = ...
complex64: dtype = ...
cfloat: dtype = ...
complex128: dtype = ...
cdouble: dtype = ...
quint8: dtype = ...
qint8: dtype = ...
qint32: dtype = ...
bool: dtype = ...
quint4x2: dtype = ...
quint2x4: dtype = ...

# Defined in torch/csrc/Layout.cpp
class layout:
    ...

# Defined in torch/csrc/utils/disable_torch_function.cpp
def DisableTorchFunction(): ...
def DisableTorchFunctionSubclass(): ...

# Defined in torch/csrc/utils/tensor_layouts.cpp
strided : layout = ...
sparse_coo : layout = ...
sparse_csr : layout = ...
sparse_csc : layout = ...
sparse_bsr : layout = ...
sparse_bsc : layout = ...
_mkldnn : layout = ...

# Defined in torch/csrc/MemoryFormat.cpp
class memory_format: ...

# Defined in torch/csrc/utils/tensor_memoryformats.cpp
contiguous_format: memory_format = ...
channels_last: memory_format = ...
channels_last_3d: memory_format = ...
preserve_format: memory_format = ...

# Defined in torch/csrc/QScheme.cpp
class qscheme: ...

# Defined in torch/csrc/utils/tensor_qschemes.h
per_tensor_affine: qscheme = ...
per_channel_affine: qscheme = ...
per_tensor_symmetric: qscheme = ...
per_channel_symmetric: qscheme = ...
per_channel_affine_float_qparams: qscheme = ...

# Defined in torch/csrc/autograd/python_function.cpp
class _FunctionBase:
    ...

# Defined in torch/csrc/autograd/python_legacy_variable.cpp
class _LegacyVariableBase(Tensor):  # inherits from Tensor to appease mypy
    def __init__(
        self,
        data: Optional[Tensor]=...,
        requires_grad: Optional[_bool]=...,
        volatile: Optional[_bool]=...,
        _grad_fn: Optional[_FunctionBase]=...
    ) -> None: ...

# Defined in torch/csrc/jit/python/init.cpp
class IODescriptor: ...

class JITException: ...

class Future:
  def __init__(self, devices: List[device]) -> None: ...
  def done(self) -> _bool: ...
  def value(self) -> Any: ...
  def wait(self) -> Any: ...
  def add_done_callback(self, callback: Callable) -> None: ...
  def then(self, callback: Callable) -> Future: ...
  def set_result(self, result: Any) -> None: ...
  def _set_unwrap_func(self, callback: Callable) -> None: ...

class _Await:
  def __init__(self) -> None: ...
  def fn(self) -> Callable: ...
  def args(self) -> Tuple[Any, ...]: ...
  def is_nowait(self) -> _bool: ...

def _jit_set_num_profiled_runs(num: _size) -> _size: ...

# Defined in torch/csrc/jit/passes/mobile_optimizer_type.h
class _MobileOptimizerType:
    ...

CONV_BN_FUSION: _MobileOptimizerType
INSERT_FOLD_PREPACK_OPS: _MobileOptimizerType
REMOVE_DROPOUT: _MobileOptimizerType
FUSE_ADD_RELU: _MobileOptimizerType
HOIST_CONV_PACKED_PARAMS: _MobileOptimizerType
VULKAN_AUTOMATIC_GPU_TRANSFER: _MobileOptimizerType

def fork(*args: Any, **kwargs: Any) -> Future: ...
def wait(fut: Future) -> Any: ...
def _awaitable(*args: Any, **kwargs: Any) -> _Await: ...
def _awaitable_wait(aw: _Await) -> Any: ...
def _awaitable_nowait(x: Any) -> _Await: ...
def _collect_all(futures: List[Future]) -> Future: ...
def _set_print_stack_traces_on_fatal_signal(print: _bool) -> None: ...

def unify_type_list(types: List[JitType]) -> JitType: ...
def _freeze_module(module: ScriptModule,
                   preserved_attrs: List[str] = [],
                   freeze_interfaces: _bool = True,
                   preserveParameters: _bool = True) -> ScriptModule: ...
def _jit_pass_optimize_frozen_graph(Graph, optimize_numerics: _bool = True) -> None: ...
def _jit_pass_optimize_for_inference(module: 'torch.jit.ScriptModule',
                                     other_methods: List[str] = []) -> None: ...
def _jit_pass_fold_frozen_conv_bn(graph: Graph): ...
def _jit_pass_fold_frozen_conv_add_or_sub(graph: Graph): ...
def _jit_pass_fold_frozen_conv_mul_or_div(graph: Graph): ...
def _jit_pass_fuse_frozen_conv_add_relu(graph: Graph): ...
def _jit_pass_concat_frozen_linear(graph: Graph): ...
def _jit_pass_convert_frozen_ops_to_mkldnn(graph: Graph): ...
def _jit_pass_transpose_frozen_linear(graph:Graph): ...
def _jit_pass_remove_dropout(module: 'torch.jit.ScriptModule'): ...

def _is_tracing() -> _bool: ...
def _jit_init() -> _bool: ...
def _jit_flatten(arg: Any) -> Tuple[List[Tensor], IODescriptor]: ...
def _jit_unflatten(vars: List[Tensor], desc: IODescriptor) -> Any: ...
def _jit_get_operation(op_name: str) -> Tuple[Callable, List[str]]: ...
def _get_operation_overload(op_name: str, op_overload_name: str) -> Tuple[Callable, Callable, List[Any]]: ...
def _get_schema(op_name: str, overload_name: str) -> FunctionSchema: ...
def _jit_pass_optimize_for_mobile(module: 'torch.jit.ScriptModule',
                                  optimization_blocklist: Set[_MobileOptimizerType],
                                  preserved_methods: List[AnyStr]) -> 'torch.jit.ScriptModule': ...
def _clone_module_with_class(module: 'torch.jit.ScriptModule',
                             ignored_methods: List[AnyStr],
                             ignored_attributes: List[AnyStr]) -> 'torch.jit.ScriptModule': ...
def _jit_pass_vulkan_optimize_for_mobile(module: 'torch.jit.ScriptModule',
                                         optimization_blocklist: Set[_MobileOptimizerType],
                                         preserved_methods: List[AnyStr]) -> 'torch.jit.ScriptModule': ...
def _jit_pass_metal_optimize_for_mobile(module: 'torch.jit.ScriptModule',
                                         preserved_methods: List[AnyStr]) -> 'torch.jit.ScriptModule': ...
def _jit_pass_inline(Graph) -> None: ...
def _jit_pass_constant_propagation(Graph) -> None: ...
def _jit_pass_propagate_shapes_on_graph(Graph) -> None: ...
def _jit_register_decomposition_for_schema(schema: FunctionSchema, Graph) -> None: ...
def _jit_erase_non_input_shape_information(Graph) -> None: ...
def _jit_get_schemas_for_operator(name :str) -> List[FunctionSchema]: ...
def _jit_get_all_schemas() -> List[FunctionSchema]: ...
def _jit_check_alias_annotation(g: Graph, args: Tuple[Any, ...], unqualified_op_name: str): ...
def _jit_can_fuse_on_cpu() -> _bool: ...
def _jit_can_fuse_on_gpu() -> _bool: ...
def _jit_can_fuse_on_cpu_legacy() -> _bool: ...
def _debug_get_fusion_group_inlining() -> _bool: ...
def _debug_set_fusion_group_inlining(enable: _bool): ...
def _jit_texpr_fuser_enabled() -> _bool: ...
def _jit_nvfuser_enabled() -> _bool: ...
def _jit_llga_enabled() -> _bool: ...
def _jit_set_llga_enabled(enable: _bool): ...
def _llvm_enabled() -> _bool: ...
def _jit_override_can_fuse_on_cpu(override: _bool): ...
def _jit_override_can_fuse_on_gpu(override: _bool): ...
def _jit_override_can_fuse_on_cpu_legacy(override: _bool): ...
def _jit_set_symbolic_shapes_test_mode(override: _bool): ...
def _jit_symbolic_shapes_test_mode_enabled() -> _bool: ...
def _jit_set_texpr_fuser_enabled(enable: _bool): ...
def _jit_set_te_must_use_llvm_cpu(use_llvm: _bool): ...
def _jit_set_nvfuser_enabled(enable: _bool) -> _bool: ...
def _jit_cat_wo_conditionals(optimize_cat: _bool): ...
def _jit_opt_conditionals(opt_conds: _bool): ...
def _jit_pass_canonicalize(graph: Graph, keep_unique_names: _bool = True): ...
def _jit_pass_erase_shape_information(graph: Graph): ...
def _jit_pass_fold_convbn(module: 'torch.jit.ScriptModule'): ...
def _jit_pass_insert_observers(module: 'torch.jit.ScriptModule',
                               method_name: str,
                               qconfig_dict: Dict[str, Any],
                               inplace: _bool,
                               quant_type: _int): ...
def _jit_pass_insert_quant_dequant(module: 'torch.jit.ScriptModule',
                                   method_name: str,
                                   inplace: _bool,
                                   debug: _bool,
                                   quant_type: _int): ...
def _jit_pass_insert_quant_dequant_for_ondevice_ptq(module: 'torch.jit.ScriptModule',
                                   method_name: str,
                                   inplace: _bool,
                                   debug: _bool,
                                   quant_type: _int): ...
def _jit_pass_quant_finalize(module: 'torch.jit.ScriptModule',
                             quant_type: _int,
                             preserved_attrs: Sequence[str]): ...
def _jit_pass_quant_finalize_for_ondevice_ptq(module: 'torch.jit.ScriptModule',
                             quant_type: _int,
                             method_name: str): ...
def _jit_pass_insert_observer_method_for_ondevice_ptq(module: 'torch.jit.ScriptModule',
                               method_name: str,
                               qconfig_dict: Dict[str, Any],
                               inplace: _bool,
                               quant_type: _int): ...
def _jit_set_profiling_executor(profiling_flag: _bool) -> _bool: ...
def _jit_set_profiling_mode(profiling_flag: _bool) -> _bool: ...
def _jit_set_fusion_strategy(strategy: List[Tuple[str, _int]]) -> List[Tuple[str, _int]]: ...
def _jit_try_infer_type(obj: Any) -> InferredType: ...
def _jit_get_trigger_value(trigger_name: str) -> _int: ...

# Defined in torch/csrc/jit/python/script_init.cpp
ResolutionCallback = Callable[[str], Callable[..., Any]]

# Defined in torch/csrc/jit/python/script_init.cpp
#        and torch/csrc/jit/python/init.cpp
def _create_function_from_graph(qualname: str, graph: Graph) -> ScriptFunction: ...
def _debug_set_autodiff_subgraph_inlining(disabled: _bool) -> None: ...
def _ivalue_tags_match(lhs: ScriptModule, rhs: ScriptModule) -> _bool: ...
def _jit_assert_is_instance(obj: Any, type: JitType): ...
def _jit_clear_class_registry() -> None: ...
def _jit_set_emit_hooks(ModuleHook: Optional[Callable], FunctionHook: Optional[Callable]) -> None: ...
def _jit_get_emit_hooks() -> Tuple[Callable, Callable]: ...
def _load_for_lite_interpreter(filename: Union[str, Path], map_location: Union[_device, str, None]): ...
def _load_for_lite_interpreter_from_buffer(buffer: BinaryIO, map_location: Union[_device, str, None]): ...
def _export_operator_list(module: LiteScriptModule): ...
def _quantize_ondevice_ptq_dynamic(module: LiteScriptModule, method_name: str): ...
Loading ...