Learn more  » Push, build, and install  RubyGems npm packages Python packages Maven artifacts PHP packages Go Modules Bower components Debian packages RPM packages NuGet packages

neilisaac / torch   python

Repository URL to install this package:

/ include / ATen / Functions.h

#pragma once

// @generated by tools/codegen/gen.py from Functions.h

#include <c10/core/Scalar.h>
#include <ATen/Tensor.h>
#include <c10/core/Storage.h>
#include <ATen/core/Generator.h>
#include <c10/util/Deprecated.h>
#include <ATen/DeviceGuard.h>
#include <c10/core/TensorOptions.h>
#include <ATen/core/Reduction.h>
#include <c10/util/Optional.h>
#include <ATen/TensorUtils.h>
#include <ATen/Context.h>
#include <ATen/TracerMode.h>
#include <ATen/core/op_registration/hacky_wrapper_for_legacy_signatures.h>

namespace at {

// These functions are defined in ATen/Utils.cpp.
#define TENSOR(T, S)                                                          \
  TORCH_API Tensor tensor(ArrayRef<T> values, const TensorOptions& options); \
  inline Tensor tensor(                                                       \
      std::initializer_list<T> values, const TensorOptions& options) {        \
    return at::tensor(ArrayRef<T>(values), options);                          \
  }                                                                           \
  inline Tensor tensor(T value, const TensorOptions& options) {               \
    return at::tensor(ArrayRef<T>(value), options);                           \
  }                                                                           \
  inline Tensor tensor(ArrayRef<T> values) {                                  \
    return at::tensor(std::move(values), at::dtype(k##S));                    \
  }                                                                           \
  inline Tensor tensor(std::initializer_list<T> values) {                     \
    return at::tensor(ArrayRef<T>(values));                                   \
  }                                                                           \
  inline Tensor tensor(T value) {                                             \
    return at::tensor(ArrayRef<T>(value));                                    \
  }
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TENSOR)
AT_FORALL_COMPLEX_TYPES(TENSOR)
#undef TENSOR

TORCH_API Tensor _cast_Byte(const Tensor & self, bool non_blocking=false);
TORCH_API Tensor _cast_Char(const Tensor & self, bool non_blocking=false);
TORCH_API Tensor _cast_Double(const Tensor & self, bool non_blocking=false);
TORCH_API Tensor _cast_Float(const Tensor & self, bool non_blocking=false);
TORCH_API Tensor _cast_Int(const Tensor & self, bool non_blocking=false);
TORCH_API Tensor _cast_Long(const Tensor & self, bool non_blocking=false);
TORCH_API Tensor _cast_Short(const Tensor & self, bool non_blocking=false);
TORCH_API Tensor _cast_Half(const Tensor & self, bool non_blocking=false);
TORCH_API Tensor _make_dual(const Tensor & primal, const Tensor & tangent, int64_t level);
TORCH_API std::tuple<Tensor,Tensor> _unpack_dual(const Tensor & dual, int64_t level);
TORCH_API std::vector<Tensor> align_tensors(TensorList tensors);
TORCH_API bool _use_cudnn_ctc_loss(const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank);
TORCH_API std::tuple<Tensor,Tensor> _cudnn_ctc_loss(const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity);
TORCH_API bool _use_cudnn_rnn_flatten_weight();
TORCH_API Tensor _cudnn_rnn_flatten_weight(TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional);
TORCH_API std::tuple<Tensor,Tensor,Tensor,Tensor,Tensor> _cudnn_rnn(const Tensor & input, TensorList weight, int64_t weight_stride0, const c10::optional<Tensor> & weight_buf, const Tensor & hx, const c10::optional<Tensor> & cx, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const c10::optional<Tensor> & dropout_state);
TORCH_API std::tuple<Tensor,Tensor,Tensor,std::vector<Tensor>> _cudnn_rnn_backward(const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & weight_buf, const Tensor & hx, const c10::optional<Tensor> & cx, const Tensor & output, const c10::optional<Tensor> & grad_output, const c10::optional<Tensor> & grad_hy, const c10::optional<Tensor> & grad_cy, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const c10::optional<Tensor> & dropout_state, const Tensor & reserve, std::array<bool,4> output_mask);
TORCH_API Tensor _cudnn_init_dropout_state(double dropout, bool train, int64_t dropout_seed, TensorOptions options);
TORCH_API Tensor _cudnn_init_dropout_state(double dropout, bool train, int64_t dropout_seed, c10::optional<ScalarType> dtype, c10::optional<Layout> layout, c10::optional<Device> device, c10::optional<bool> pin_memory);
TORCH_API int64_t _debug_has_internal_overlap(const Tensor & self);
TORCH_API std::tuple<Tensor,Tensor> _fused_dropout(const Tensor & self, double p, c10::optional<Generator> generator=c10::nullopt);
TORCH_API Tensor _masked_scale(const Tensor & self, const Tensor & mask, double scale);
TORCH_API std::tuple<Tensor,Tensor> _sobol_engine_draw(const Tensor & quasi, int64_t n, const Tensor & sobolstate, int64_t dimension, int64_t num_generated, c10::optional<ScalarType> dtype);
TORCH_API Tensor & _sobol_engine_ff_(Tensor & self, int64_t n, const Tensor & sobolstate, int64_t dimension, int64_t num_generated);
TORCH_API Tensor & _sobol_engine_scramble_(Tensor & self, const Tensor & ltm, int64_t dimension);
TORCH_API Tensor & _sobol_engine_initialize_state_(Tensor & self, int64_t dimension);
TORCH_API Tensor _reshape_from_tensor(const Tensor & self, const Tensor & shape);
TORCH_API Tensor _shape_as_tensor(const Tensor & self);
TORCH_API Tensor dropout(const Tensor & input, double p, bool train);
TORCH_API Tensor & dropout_(Tensor & self, double p, bool train);
TORCH_API Tensor feature_dropout(const Tensor & input, double p, bool train);
TORCH_API Tensor & feature_dropout_(Tensor & self, double p, bool train);
TORCH_API Tensor alpha_dropout(const Tensor & input, double p, bool train);
TORCH_API Tensor & alpha_dropout_(Tensor & self, double p, bool train);
TORCH_API Tensor feature_alpha_dropout(const Tensor & input, double p, bool train);
TORCH_API Tensor & feature_alpha_dropout_(Tensor & self, double p, bool train);
TORCH_API Tensor abs(const Tensor & self);
TORCH_API Tensor & abs_(Tensor & self);
TORCH_API Tensor & abs_out(Tensor & out, const Tensor & self);
TORCH_API Tensor & abs_outf(const Tensor & self, Tensor & out);
TORCH_API Tensor absolute(const Tensor & self);
TORCH_API Tensor & absolute_out(Tensor & out, const Tensor & self);
TORCH_API Tensor & absolute_outf(const Tensor & self, Tensor & out);
TORCH_API Tensor angle(const Tensor & self);
TORCH_API Tensor & angle_out(Tensor & out, const Tensor & self);
TORCH_API Tensor & angle_outf(const Tensor & self, Tensor & out);
TORCH_API Tensor view_as_real(const Tensor & self);
TORCH_API Tensor view_as_complex(const Tensor & self);
TORCH_API Tensor sgn(const Tensor & self);
TORCH_API Tensor & sgn_out(Tensor & out, const Tensor & self);
TORCH_API Tensor & sgn_outf(const Tensor & self, Tensor & out);
TORCH_API Tensor real(const Tensor & self);
TORCH_API Tensor imag(const Tensor & self);
TORCH_API Tensor conj(const Tensor & self);
TORCH_API Tensor & conj_out(Tensor & out, const Tensor & self);
TORCH_API Tensor & conj_outf(const Tensor & self, Tensor & out);
TORCH_API Tensor _conj(const Tensor & self);
TORCH_API Tensor acos(const Tensor & self);
TORCH_API Tensor & acos_(Tensor & self);
TORCH_API Tensor & acos_out(Tensor & out, const Tensor & self);
TORCH_API Tensor & acos_outf(const Tensor & self, Tensor & out);
TORCH_API Tensor arccos(const Tensor & self);
TORCH_API Tensor & arccos_(Tensor & self);
TORCH_API Tensor & arccos_out(Tensor & out, const Tensor & self);
TORCH_API Tensor & arccos_outf(const Tensor & self, Tensor & out);
TORCH_API Tensor avg_pool1d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride={}, IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true);
TORCH_API Tensor adaptive_avg_pool1d(const Tensor & self, IntArrayRef output_size);
TORCH_API std::tuple<Tensor,Tensor> adaptive_max_pool1d(const Tensor & self, IntArrayRef output_size);
TORCH_API Tensor add(const Tensor & self, const Tensor & other, Scalar alpha=1);
TORCH_API Tensor & add_out(Tensor & out, const Tensor & self, const Tensor & other, Scalar alpha=1);
TORCH_API Tensor & add_outf(const Tensor & self, const Tensor & other, Scalar alpha, Tensor & out);
TORCH_API Tensor _add_relu(const Tensor & self, const Tensor & other, Scalar alpha=1);
TORCH_API Tensor & _add_relu_(Tensor & self, const Tensor & other, Scalar alpha=1);
TORCH_API Tensor & _add_relu_out(Tensor & out, const Tensor & self, const Tensor & other, Scalar alpha=1);
TORCH_API Tensor & _add_relu_outf(const Tensor & self, const Tensor & other, Scalar alpha, Tensor & out);
TORCH_API Tensor add(const Tensor & self, Scalar other, Scalar alpha=1);
TORCH_API Tensor addmv(const Tensor & self, const Tensor & mat, const Tensor & vec, Scalar beta=1, Scalar alpha=1);
TORCH_API Tensor & addmv_(Tensor & self, const Tensor & mat, const Tensor & vec, Scalar beta=1, Scalar alpha=1);
TORCH_API Tensor & addmv_out(Tensor & out, const Tensor & self, const Tensor & mat, const Tensor & vec, Scalar beta=1, Scalar alpha=1);
TORCH_API Tensor & addmv_outf(const Tensor & self, const Tensor & mat, const Tensor & vec, Scalar beta, Scalar alpha, Tensor & out);
TORCH_API Tensor & _addmv_impl_(Tensor & self, const Tensor & self2, const Tensor & mat, const Tensor & vec, Scalar beta=1, Scalar alpha=1);
TORCH_API Tensor addr(const Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta=1, Scalar alpha=1);
TORCH_API Tensor & addr_out(Tensor & out, const Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta=1, Scalar alpha=1);
TORCH_API Tensor & addr_outf(const Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha, Tensor & out);
TORCH_API Tensor affine_grid_generator(const Tensor & theta, IntArrayRef size, bool align_corners);
TORCH_API Tensor affine_grid_generator_backward(const Tensor & grad, IntArrayRef size, bool align_corners);
TORCH_API Tensor all(const Tensor & self, int64_t dim, bool keepdim=false);
TORCH_API Tensor & all_out(Tensor & out, const Tensor & self, int64_t dim, bool keepdim=false);
TORCH_API Tensor & all_outf(const Tensor & self, int64_t dim, bool keepdim, Tensor & out);
TORCH_API Tensor all(const Tensor & self, Dimname dim, bool keepdim=false);
TORCH_API Tensor & all_out(Tensor & out, const Tensor & self, Dimname dim, bool keepdim=false);
TORCH_API Tensor & all_outf(const Tensor & self, Dimname dim, bool keepdim, Tensor & out);
TORCH_API bool allclose(const Tensor & self, const Tensor & other, double rtol=1e-05, double atol=1e-08, bool equal_nan=false);
TORCH_API Tensor any(const Tensor & self, int64_t dim, bool keepdim=false);
TORCH_API Tensor & any_out(Tensor & out, const Tensor & self, int64_t dim, bool keepdim=false);
TORCH_API Tensor & any_outf(const Tensor & self, int64_t dim, bool keepdim, Tensor & out);
TORCH_API Tensor any(const Tensor & self, Dimname dim, bool keepdim=false);
TORCH_API Tensor & any_out(Tensor & out, const Tensor & self, Dimname dim, bool keepdim=false);
TORCH_API Tensor & any_outf(const Tensor & self, Dimname dim, bool keepdim, Tensor & out);
TORCH_API Tensor arange(Scalar end, TensorOptions options={});
TORCH_API Tensor arange(Scalar end, c10::optional<ScalarType> dtype, c10::optional<Layout> layout, c10::optional<Device> device, c10::optional<bool> pin_memory);
TORCH_API Tensor arange(Scalar start, Scalar end, TensorOptions options={});
TORCH_API Tensor arange(Scalar start, Scalar end, c10::optional<ScalarType> dtype, c10::optional<Layout> layout, c10::optional<Device> device, c10::optional<bool> pin_memory);
TORCH_API Tensor arange(Scalar start, Scalar end, Scalar step, TensorOptions options={});
TORCH_API Tensor arange(Scalar start, Scalar end, Scalar step, c10::optional<ScalarType> dtype, c10::optional<Layout> layout, c10::optional<Device> device, c10::optional<bool> pin_memory);
TORCH_API Tensor & arange_out(Tensor & out, Scalar end);
TORCH_API Tensor & arange_outf(Scalar end, Tensor & out);
TORCH_API Tensor & arange_out(Tensor & out, Scalar start, Scalar end, Scalar step=1);
TORCH_API Tensor & arange_outf(Scalar start, Scalar end, Scalar step, Tensor & out);
TORCH_API Tensor _dim_arange(const Tensor & like, int64_t dim);
TORCH_API Tensor argmax(const Tensor & self, c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false);
TORCH_API Tensor & argmax_out(Tensor & out, const Tensor & self, c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false);
TORCH_API Tensor & argmax_outf(const Tensor & self, c10::optional<int64_t> dim, bool keepdim, Tensor & out);
TORCH_API Tensor argmin(const Tensor & self, c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false);
TORCH_API Tensor & argmin_out(Tensor & out, const Tensor & self, c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false);
TORCH_API Tensor & argmin_outf(const Tensor & self, c10::optional<int64_t> dim, bool keepdim, Tensor & out);
TORCH_API Tensor acosh(const Tensor & self);
TORCH_API Tensor & acosh_(Tensor & self);
TORCH_API Tensor & acosh_out(Tensor & out, const Tensor & self);
TORCH_API Tensor & acosh_outf(const Tensor & self, Tensor & out);
TORCH_API Tensor arccosh(const Tensor & self);
TORCH_API Tensor & arccosh_(Tensor & self);
TORCH_API Tensor & arccosh_out(Tensor & out, const Tensor & self);
TORCH_API Tensor & arccosh_outf(const Tensor & self, Tensor & out);
TORCH_API Tensor asinh(const Tensor & self);
TORCH_API Tensor & asinh_(Tensor & self);
TORCH_API Tensor & asinh_out(Tensor & out, const Tensor & self);
TORCH_API Tensor & asinh_outf(const Tensor & self, Tensor & out);
TORCH_API Tensor arcsinh(const Tensor & self);
TORCH_API Tensor & arcsinh_(Tensor & self);
TORCH_API Tensor & arcsinh_out(Tensor & out, const Tensor & self);
TORCH_API Tensor & arcsinh_outf(const Tensor & self, Tensor & out);
TORCH_API Tensor atanh(const Tensor & self);
TORCH_API Tensor & atanh_(Tensor & self);
TORCH_API Tensor & atanh_out(Tensor & out, const Tensor & self);
TORCH_API Tensor & atanh_outf(const Tensor & self, Tensor & out);
TORCH_API Tensor arctanh(const Tensor & self);
TORCH_API Tensor & arctanh_(Tensor & self);
TORCH_API Tensor & arctanh_out(Tensor & out, const Tensor & self);
TORCH_API Tensor & arctanh_outf(const Tensor & self, Tensor & out);
TORCH_API Tensor as_strided(const Tensor & self, IntArrayRef size, IntArrayRef stride, c10::optional<int64_t> storage_offset=c10::nullopt);
TORCH_API Tensor & as_strided_(Tensor & self, IntArrayRef size, IntArrayRef stride, c10::optional<int64_t> storage_offset=c10::nullopt);
TORCH_API Tensor asin(const Tensor & self);
TORCH_API Tensor & asin_(Tensor & self);
TORCH_API Tensor & asin_out(Tensor & out, const Tensor & self);
TORCH_API Tensor & asin_outf(const Tensor & self, Tensor & out);
TORCH_API Tensor arcsin(const Tensor & self);
TORCH_API Tensor & arcsin_(Tensor & self);
TORCH_API Tensor & arcsin_out(Tensor & out, const Tensor & self);
TORCH_API Tensor & arcsin_outf(const Tensor & self, Tensor & out);
TORCH_API Tensor atan(const Tensor & self);
TORCH_API Tensor & atan_(Tensor & self);
TORCH_API Tensor & atan_out(Tensor & out, const Tensor & self);
TORCH_API Tensor & atan_outf(const Tensor & self, Tensor & out);
TORCH_API Tensor arctan(const Tensor & self);
TORCH_API Tensor & arctan_(Tensor & self);
TORCH_API Tensor & arctan_out(Tensor & out, const Tensor & self);
TORCH_API Tensor & arctan_outf(const Tensor & self, Tensor & out);
TORCH_API Tensor atleast_1d(const Tensor & self);
TORCH_API std::vector<Tensor> atleast_1d(TensorList tensors);
TORCH_API Tensor atleast_2d(const Tensor & self);
TORCH_API std::vector<Tensor> atleast_2d(TensorList tensors);
TORCH_API Tensor atleast_3d(const Tensor & self);
TORCH_API std::vector<Tensor> atleast_3d(TensorList tensors);
TORCH_API Tensor baddbmm(const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta=1, Scalar alpha=1);
TORCH_API Tensor & _baddbmm_mkl_(Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta=1, Scalar alpha=1);
TORCH_API Tensor & baddbmm_out(Tensor & out, const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta=1, Scalar alpha=1);
TORCH_API Tensor & baddbmm_outf(const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha, Tensor & out);
TORCH_API Tensor bartlett_window(int64_t window_length, TensorOptions options={});
TORCH_API Tensor bartlett_window(int64_t window_length, c10::optional<ScalarType> dtype, c10::optional<Layout> layout, c10::optional<Device> device, c10::optional<bool> pin_memory);
TORCH_API Tensor bartlett_window(int64_t window_length, bool periodic, TensorOptions options={});
TORCH_API Tensor bartlett_window(int64_t window_length, bool periodic, c10::optional<ScalarType> dtype, c10::optional<Layout> layout, c10::optional<Device> device, c10::optional<bool> pin_memory);
TORCH_API Tensor batch_norm(const Tensor & input, const c10::optional<Tensor> & weight, const c10::optional<Tensor> & bias, const c10::optional<Tensor> & running_mean, const c10::optional<Tensor> & running_var, bool training, double momentum, double eps, bool cudnn_enabled);
TORCH_API Tensor quantized_batch_norm(const Tensor & input, const c10::optional<Tensor> & weight, const c10::optional<Tensor> & bias, const Tensor & mean, const Tensor & var, double eps, double output_scale, int64_t output_zero_point);
TORCH_API std::tuple<Tensor,Tensor,Tensor,Tensor,int64_t> _batch_norm_impl_index(const Tensor & input, const c10::optional<Tensor> & weight, const c10::optional<Tensor> & bias, const c10::optional<Tensor> & running_mean, const c10::optional<Tensor> & running_var, bool training, double momentum, double eps, bool cudnn_enabled);
TORCH_API std::tuple<Tensor,Tensor,Tensor> _batch_norm_impl_index_backward(int64_t impl_index, const Tensor & input, const Tensor & grad_output, const c10::optional<Tensor> & weight, const c10::optional<Tensor> & running_mean, const c10::optional<Tensor> & running_var, const c10::optional<Tensor> & save_mean, const c10::optional<Tensor> & save_var_transform, bool train, double eps, std::array<bool,3> output_mask, const Tensor & reservedSpace);
TORCH_API Tensor bernoulli(const Tensor & self, c10::optional<Generator> generator=c10::nullopt);
TORCH_API Tensor & bernoulli_out(Tensor & out, const Tensor & self, c10::optional<Generator> generator=c10::nullopt);
TORCH_API Tensor & bernoulli_outf(const Tensor & self, c10::optional<Generator> generator, Tensor & out);
TORCH_API Tensor bernoulli(const Tensor & self, double p, c10::optional<Generator> generator=c10::nullopt);
TORCH_API Tensor bilinear(const Tensor & input1, const Tensor & input2, const Tensor & weight, const c10::optional<Tensor> & bias);
TORCH_API Tensor binary_cross_entropy(const Tensor & self, const Tensor & target, const c10::optional<Tensor> & weight={}, int64_t reduction=at::Reduction::Mean);
TORCH_API Tensor & binary_cross_entropy_out(Tensor & out, const Tensor & self, const Tensor & target, const c10::optional<Tensor> & weight={}, int64_t reduction=at::Reduction::Mean);
TORCH_API Tensor & binary_cross_entropy_outf(const Tensor & self, const Tensor & target, const c10::optional<Tensor> & weight, int64_t reduction, Tensor & out);
TORCH_API Tensor binary_cross_entropy_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const c10::optional<Tensor> & weight={}, int64_t reduction=at::Reduction::Mean);
TORCH_API Tensor & binary_cross_entropy_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const c10::optional<Tensor> & weight={}, int64_t reduction=at::Reduction::Mean);
TORCH_API Tensor & binary_cross_entropy_backward_outf(const Tensor & grad_output, const Tensor & self, const Tensor & target, const c10::optional<Tensor> & weight, int64_t reduction, Tensor & grad_input);
TORCH_API Tensor binary_cross_entropy_with_logits(const Tensor & self, const Tensor & target, const c10::optional<Tensor> & weight={}, const c10::optional<Tensor> & pos_weight={}, int64_t reduction=at::Reduction::Mean);
TORCH_API Tensor binary_cross_entropy_with_logits_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const c10::optional<Tensor> & weight={}, const c10::optional<Tensor> & pos_weight={}, int64_t reduction=at::Reduction::Mean);
TORCH_API Tensor bincount(const Tensor & self, const c10::optional<Tensor> & weights={}, int64_t minlength=0);
TORCH_API Tensor bitwise_not(const Tensor & self);
TORCH_API Tensor & bitwise_not_out(Tensor & out, const Tensor & self);
TORCH_API Tensor & bitwise_not_outf(const Tensor & self, Tensor & out);
TORCH_API Tensor copysign(const Tensor & self, const Tensor & other);
TORCH_API Tensor & copysign_out(Tensor & out, const Tensor & self, const Tensor & other);
TORCH_API Tensor & copysign_outf(const Tensor & self, const Tensor & other, Tensor & out);
TORCH_API Tensor copysign(const Tensor & self, Scalar other);
TORCH_API Tensor logical_not(const Tensor & self);
TORCH_API Tensor & logical_not_out(Tensor & out, const Tensor & self);
TORCH_API Tensor & logical_not_outf(const Tensor & self, Tensor & out);
TORCH_API Tensor logical_xor(const Tensor & self, const Tensor & other);
TORCH_API Tensor & logical_xor_out(Tensor & out, const Tensor & self, const Tensor & other);
TORCH_API Tensor & logical_xor_outf(const Tensor & self, const Tensor & other, Tensor & out);
TORCH_API Tensor logical_and(const Tensor & self, const Tensor & other);
TORCH_API Tensor & logical_and_out(Tensor & out, const Tensor & self, const Tensor & other);
TORCH_API Tensor & logical_and_outf(const Tensor & self, const Tensor & other, Tensor & out);
TORCH_API Tensor logical_or(const Tensor & self, const Tensor & other);
TORCH_API Tensor & logical_or_out(Tensor & out, const Tensor & self, const Tensor & other);
TORCH_API Tensor & logical_or_outf(const Tensor & self, const Tensor & other, Tensor & out);
TORCH_API Tensor blackman_window(int64_t window_length, TensorOptions options={});
TORCH_API Tensor blackman_window(int64_t window_length, c10::optional<ScalarType> dtype, c10::optional<Layout> layout, c10::optional<Device> device, c10::optional<bool> pin_memory);
TORCH_API Tensor blackman_window(int64_t window_length, bool periodic, TensorOptions options={});
TORCH_API Tensor blackman_window(int64_t window_length, bool periodic, c10::optional<ScalarType> dtype, c10::optional<Layout> layout, c10::optional<Device> device, c10::optional<bool> pin_memory);
TORCH_API Tensor bmm(const Tensor & self, const Tensor & mat2);
TORCH_API Tensor _bmm(const Tensor & self, const Tensor & mat2, bool deterministic=false);
TORCH_API Tensor & bmm_out(Tensor & out, const Tensor & self, const Tensor & mat2);
TORCH_API Tensor & bmm_outf(const Tensor & self, const Tensor & mat2, Tensor & out);
TORCH_API Tensor & _bmm_out(Tensor & out, const Tensor & self, const Tensor & mat2, bool deterministic=false);
TORCH_API Tensor & _bmm_outf(const Tensor & self, const Tensor & mat2, bool deterministic, Tensor & out);
TORCH_API std::vector<Tensor> broadcast_tensors(TensorList tensors);
TORCH_API Tensor broadcast_to(const Tensor & self, IntArrayRef size);
TORCH_API Tensor cat(TensorList tensors, int64_t dim=0);
TORCH_API Tensor & cat_out(Tensor & out, TensorList tensors, int64_t dim=0);
TORCH_API Tensor & cat_outf(TensorList tensors, int64_t dim, Tensor & out);
TORCH_API Tensor cat(TensorList tensors, Dimname dim);
TORCH_API Tensor & cat_out(Tensor & out, TensorList tensors, Dimname dim);
TORCH_API Tensor & cat_outf(TensorList tensors, Dimname dim, Tensor & out);
TORCH_API Tensor block_diag(TensorList tensors);
TORCH_API Tensor ceil(const Tensor & self);
TORCH_API Tensor & ceil_(Tensor & self);
TORCH_API Tensor & ceil_out(Tensor & out, const Tensor & self);
TORCH_API Tensor & ceil_outf(const Tensor & self, Tensor & out);
TORCH_API Tensor chain_matmul(TensorList matrices);
TORCH_API std::vector<Tensor> unsafe_chunk(const Tensor & self, int64_t chunks, int64_t dim=0);
TORCH_API std::vector<Tensor> chunk(const Tensor & self, int64_t chunks, int64_t dim=0);
TORCH_API std::vector<Tensor> tensor_split(const Tensor & self, int64_t sections, int64_t dim=0);
TORCH_API std::vector<Tensor> tensor_split(const Tensor & self, IntArrayRef indices, int64_t dim=0);
TORCH_API std::vector<Tensor> tensor_split(const Tensor & self, const Tensor & tensor_indices_or_sections, int64_t dim=0);
TORCH_API Tensor clamp(const Tensor & self, c10::optional<Scalar> min=c10::nullopt, c10::optional<Scalar> max=c10::nullopt);
TORCH_API Tensor & clamp_(Tensor & self, c10::optional<Scalar> min=c10::nullopt, c10::optional<Scalar> max=c10::nullopt);
TORCH_API Tensor & clamp_out(Tensor & out, const Tensor & self, c10::optional<Scalar> min=c10::nullopt, c10::optional<Scalar> max=c10::nullopt);
TORCH_API Tensor & clamp_outf(const Tensor & self, c10::optional<Scalar> min, c10::optional<Scalar> max, Tensor & out);
TORCH_API Tensor clamp_max(const Tensor & self, Scalar max);
TORCH_API Tensor & clamp_max_(Tensor & self, Scalar max);
TORCH_API Tensor & clamp_max_out(Tensor & out, const Tensor & self, Scalar max);
TORCH_API Tensor & clamp_max_outf(const Tensor & self, Scalar max, Tensor & out);
TORCH_API Tensor clamp_min(const Tensor & self, Scalar min);
TORCH_API Tensor & clamp_min_(Tensor & self, Scalar min);
TORCH_API Tensor & clamp_min_out(Tensor & out, const Tensor & self, Scalar min);
TORCH_API Tensor & clamp_min_outf(const Tensor & self, Scalar min, Tensor & out);
TORCH_API Tensor clip(const Tensor & self, c10::optional<Scalar> min=c10::nullopt, c10::optional<Scalar> max=c10::nullopt);
TORCH_API Tensor & clip_(Tensor & self, c10::optional<Scalar> min=c10::nullopt, c10::optional<Scalar> max=c10::nullopt);
TORCH_API Tensor & clip_out(Tensor & out, const Tensor & self, c10::optional<Scalar> min=c10::nullopt, c10::optional<Scalar> max=c10::nullopt);
TORCH_API Tensor & clip_outf(const Tensor & self, c10::optional<Scalar> min, c10::optional<Scalar> max, Tensor & out);
TORCH_API bool cudnn_is_acceptable(const Tensor & self);
TORCH_API Tensor complex(const Tensor & real, const Tensor & imag);
TORCH_API Tensor & complex_out(Tensor & out, const Tensor & real, const Tensor & imag);
TORCH_API Tensor & complex_outf(const Tensor & real, const Tensor & imag, Tensor & out);
TORCH_API Tensor polar(const Tensor & abs, const Tensor & angle);
TORCH_API Tensor & polar_out(Tensor & out, const Tensor & abs, const Tensor & angle);
TORCH_API Tensor & polar_outf(const Tensor & abs, const Tensor & angle, Tensor & out);
TORCH_API Tensor constant_pad_nd(const Tensor & self, IntArrayRef pad, Scalar value=0);
TORCH_API Tensor convolution(const Tensor & input, const Tensor & weight, const c10::optional<Tensor> & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups);
TORCH_API Tensor convolution_overrideable(const Tensor & input, const Tensor & weight, const c10::optional<Tensor> & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups);
TORCH_API std::tuple<Tensor,Tensor,Tensor> convolution_backward_overrideable(const Tensor & grad_output, const Tensor & input, const Tensor & weight, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups, std::array<bool,3> output_mask);
TORCH_API Tensor _convolution(const Tensor & input, const Tensor & weight, const c10::optional<Tensor> & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32);
TORCH_API Tensor _convolution(const Tensor & input, const Tensor & weight, const c10::optional<Tensor> & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled);
TORCH_API Tensor _convolution_nogroup(const Tensor & input, const Tensor & weight, const c10::optional<Tensor> & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding);
TORCH_API std::tuple<Tensor,Tensor,Tensor> _convolution_double_backward(const c10::optional<Tensor> & ggI, const c10::optional<Tensor> & ggW, const c10::optional<Tensor> & ggb, const Tensor & gO, const Tensor & weight, const Tensor & self, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, std::array<bool,3> output_mask);
TORCH_API Tensor conv1d(const Tensor & input, const Tensor & weight, const c10::optional<Tensor> & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef dilation=1, int64_t groups=1);
TORCH_API Tensor conv2d(const Tensor & input, const Tensor & weight, const c10::optional<Tensor> & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef dilation=1, int64_t groups=1);
TORCH_API Tensor conv3d(const Tensor & input, const Tensor & weight, const c10::optional<Tensor> & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef dilation=1, int64_t groups=1);
TORCH_API Tensor conv_tbc(const Tensor & self, const Tensor & weight, const Tensor & bias, int64_t pad=0);
TORCH_API std::tuple<Tensor,Tensor,Tensor> conv_tbc_backward(const Tensor & self, const Tensor & input, const Tensor & weight, const Tensor & bias, int64_t pad);
TORCH_API Tensor conv_transpose1d(const Tensor & input, const Tensor & weight, const c10::optional<Tensor> & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef output_padding=0, int64_t groups=1, IntArrayRef dilation=1);
TORCH_API Tensor conv_transpose2d(const Tensor & input, const Tensor & weight, const c10::optional<Tensor> & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef output_padding=0, int64_t groups=1, IntArrayRef dilation=1);
TORCH_API Tensor conv_transpose3d(const Tensor & input, const Tensor & weight, const c10::optional<Tensor> & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef output_padding=0, int64_t groups=1, IntArrayRef dilation=1);
TORCH_API Tensor _copy_from(const Tensor & self, const Tensor & dst, bool non_blocking=false);
TORCH_API Tensor cos(const Tensor & self);
TORCH_API Tensor & cos_(Tensor & self);
TORCH_API Tensor & cos_out(Tensor & out, const Tensor & self);
TORCH_API Tensor & cos_outf(const Tensor & self, Tensor & out);
TORCH_API Tensor cosh(const Tensor & self);
TORCH_API Tensor & cosh_(Tensor & self);
TORCH_API Tensor & cosh_out(Tensor & out, const Tensor & self);
TORCH_API Tensor & cosh_outf(const Tensor & self, Tensor & out);
TORCH_API Tensor cosine_embedding_loss(const Tensor & input1, const Tensor & input2, const Tensor & target, double margin=0.0, int64_t reduction=at::Reduction::Mean);
TORCH_API Tensor count_nonzero(const Tensor & self, IntArrayRef dim);
TORCH_API Tensor count_nonzero(const Tensor & self, c10::optional<int64_t> dim=c10::nullopt);
TORCH_API Tensor cudnn_affine_grid_generator(const Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W);
TORCH_API Tensor cudnn_affine_grid_generator_backward(const Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W);
TORCH_API std::tuple<Tensor,Tensor,Tensor,Tensor> cudnn_batch_norm(const Tensor & input, const Tensor & weight, const c10::optional<Tensor> & bias, const c10::optional<Tensor> & running_mean, const c10::optional<Tensor> & running_var, bool training, double exponential_average_factor, double epsilon);
TORCH_API std::tuple<Tensor,Tensor,Tensor> cudnn_batch_norm_backward(const Tensor & input, const Tensor & grad_output, const Tensor & weight, const c10::optional<Tensor> & running_mean, const c10::optional<Tensor> & running_var, const c10::optional<Tensor> & save_mean, const c10::optional<Tensor> & save_var, double epsilon, const Tensor & reserveSpace);
TORCH_API Tensor cudnn_convolution(const Tensor & self, const Tensor & weight, const c10::optional<Tensor> & bias, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic);
TORCH_API Tensor cudnn_convolution(const Tensor & self, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic);
TORCH_API Tensor cudnn_convolution(const Tensor & self, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32);
TORCH_API Tensor cudnn_convolution_backward_input(IntArrayRef self_size, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32);
TORCH_API std::tuple<Tensor,Tensor> cudnn_convolution_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32, std::array<bool,2> output_mask);
TORCH_API Tensor cudnn_convolution_backward_weight(IntArrayRef weight_size, const Tensor & grad_output, const Tensor & self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32);
TORCH_API Tensor cudnn_convolution_transpose(const Tensor & self, const Tensor & weight, const c10::optional<Tensor> & bias, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic);
TORCH_API Tensor cudnn_convolution_transpose(const Tensor & self, const Tensor & weight, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic);
TORCH_API Tensor cudnn_convolution_transpose(const Tensor & self, const Tensor & weight, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32);
Loading ...