#pragma once
// @generated by tools/codegen/gen.py from NativeFunctions.h
#include <ATen/Context.h>
#include <ATen/MetaFunctions.h>
#include <ATen/core/Reduction.h>
#include <c10/core/ScalarType.h>
#include <c10/core/TensorOptions.h>
#include <array>
#include <functional>
#include <string>
#include <tuple>
#include <vector>
namespace c10 {
class Scalar;
}
namespace at {
struct Generator;
class Tensor;
struct Type;
} // namespace at
namespace at {
namespace native {
TORCH_API Tensor _cast_Byte(const Tensor & self, bool non_blocking=false);
TORCH_API Tensor _cast_Char(const Tensor & self, bool non_blocking=false);
TORCH_API Tensor _cast_Double(const Tensor & self, bool non_blocking=false);
TORCH_API Tensor _cast_Float(const Tensor & self, bool non_blocking=false);
TORCH_API Tensor _cast_Int(const Tensor & self, bool non_blocking=false);
TORCH_API Tensor _cast_Long(const Tensor & self, bool non_blocking=false);
TORCH_API Tensor _cast_Short(const Tensor & self, bool non_blocking=false);
TORCH_API Tensor _cast_Half(const Tensor & self, bool non_blocking=false);
TORCH_API void _backward(const Tensor & self, TensorList inputs, const Tensor & gradient={}, c10::optional<bool> retain_graph=c10::nullopt, bool create_graph=false);
TORCH_API void set_data(Tensor & self, const Tensor & new_data);
TORCH_API Tensor data(const Tensor & self);
TORCH_API bool is_leaf(const Tensor & self);
TORCH_API int64_t output_nr(const Tensor & self);
TORCH_API int64_t _version(const Tensor & self);
TORCH_API Tensor & requires_grad_(Tensor & self, bool requires_grad=true);
TORCH_API void retain_grad(Tensor & self);
TORCH_API Tensor _fw_primal(const Tensor & self, int64_t level);
TORCH_API Tensor _make_dual(const Tensor & primal, const Tensor & tangent, int64_t level);
TORCH_API std::tuple<Tensor,Tensor> _unpack_dual(const Tensor & dual, int64_t level);
TORCH_API Tensor & rename_(Tensor & self, c10::optional<DimnameList> names);
TORCH_API Tensor rename(const Tensor & self, c10::optional<DimnameList> names);
TORCH_API Tensor align_to(const Tensor & self, DimnameList names);
TORCH_API Tensor align_to(const Tensor & self, DimnameList order, int64_t ellipsis_idx);
TORCH_API Tensor align_as(const Tensor & self, const Tensor & other);
TORCH_API std::vector<Tensor> align_tensors(TensorList tensors);
TORCH_API Tensor refine_names(const Tensor & self, DimnameList names);
TORCH_API bool _use_cudnn_ctc_loss(const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank);
TORCH_API std::tuple<Tensor,Tensor> _cudnn_ctc_loss(const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity);
TORCH_API bool _use_cudnn_rnn_flatten_weight();
TORCH_API Tensor _cudnn_rnn_flatten_weight(TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional);
TORCH_API std::tuple<Tensor,Tensor,Tensor,Tensor,Tensor> _cudnn_rnn(const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & weight_buf, const Tensor & hx, const Tensor & cx, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const Tensor & dropout_state);
TORCH_API std::tuple<Tensor,Tensor,Tensor,std::vector<Tensor>> _cudnn_rnn_backward(const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & weight_buf, const Tensor & hx, const Tensor & cx, const Tensor & output, const Tensor & grad_output, const Tensor & grad_hy, const Tensor & grad_cy, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const Tensor & dropout_state, const Tensor & reserve, std::array<bool,4> output_mask);
TORCH_API Tensor _cudnn_init_dropout_state(double dropout, bool train, int64_t dropout_seed, const TensorOptions & options);
TORCH_API int64_t _debug_has_internal_overlap(const Tensor & self);
TORCH_API std::tuple<Tensor,Tensor> fused_dropout_cuda(const Tensor & self, double p, c10::optional<Generator> generator=c10::nullopt);
TORCH_API Tensor masked_scale_cuda(const Tensor & self, const Tensor & mask, double scale);
TORCH_API std::tuple<Tensor,Tensor> _sobol_engine_draw(const Tensor & quasi, int64_t n, const Tensor & sobolstate, int64_t dimension, int64_t num_generated, c10::optional<ScalarType> dtype);
TORCH_API Tensor & _sobol_engine_ff_(Tensor & self, int64_t n, const Tensor & sobolstate, int64_t dimension, int64_t num_generated);
TORCH_API Tensor & _sobol_engine_scramble_(Tensor & self, const Tensor & ltm, int64_t dimension);
TORCH_API Tensor & _sobol_engine_initialize_state_(Tensor & self, int64_t dimension);
TORCH_API Tensor _reshape_from_tensor(const Tensor & self, const Tensor & shape);
TORCH_API Tensor _shape_as_tensor(const Tensor & self);
TORCH_API Tensor dropout(const Tensor & input, double p, bool train);
TORCH_API Tensor & dropout_(Tensor & self, double p, bool train);
TORCH_API Tensor feature_dropout(const Tensor & input, double p, bool train);
TORCH_API Tensor & feature_dropout_(Tensor & self, double p, bool train);
TORCH_API Tensor alpha_dropout(const Tensor & input, double p, bool train);
TORCH_API Tensor & alpha_dropout_(Tensor & self, double p, bool train);
TORCH_API Tensor feature_alpha_dropout(const Tensor & input, double p, bool train);
TORCH_API Tensor & feature_alpha_dropout_(Tensor & self, double p, bool train);
TORCH_API Tensor abs(const Tensor & self);
TORCH_API Tensor & abs_(Tensor & self);
TORCH_API Tensor & abs_out(Tensor & out, const Tensor & self);
TORCH_API Tensor absolute(const Tensor & self);
TORCH_API Tensor & absolute_(Tensor & self);
TORCH_API Tensor & absolute_out(Tensor & out, const Tensor & self);
TORCH_API Tensor angle(const Tensor & self);
TORCH_API Tensor & angle_out(Tensor & out, const Tensor & self);
TORCH_API Tensor view_as_real(const Tensor & self);
TORCH_API Tensor view_as_complex(const Tensor & self);
TORCH_API Tensor sgn(const Tensor & self);
TORCH_API Tensor & sgn_(Tensor & self);
TORCH_API Tensor & sgn_out(Tensor & out, const Tensor & self);
TORCH_API Tensor real(const Tensor & self);
TORCH_API Tensor imag(const Tensor & self);
TORCH_API Tensor conj(const Tensor & self);
TORCH_API Tensor & conj_out(Tensor & out, const Tensor & self);
TORCH_API Tensor _conj(const Tensor & self);
TORCH_API Tensor acos(const Tensor & self);
TORCH_API Tensor & acos_(Tensor & self);
TORCH_API Tensor & acos_out(Tensor & out, const Tensor & self);
TORCH_API Tensor arccos(const Tensor & self);
TORCH_API Tensor & arccos_(Tensor & self);
TORCH_API Tensor & arccos_out(Tensor & out, const Tensor & self);
TORCH_API Tensor avg_pool1d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride={}, IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true);
TORCH_API Tensor adaptive_avg_pool1d(const Tensor & self, IntArrayRef output_size);
TORCH_API std::tuple<Tensor,Tensor> adaptive_max_pool1d(const Tensor & self, IntArrayRef output_size);
struct TORCH_API structured_add_out : public at::meta::add_Tensor {
void impl(const Tensor & self, const Tensor & other, Scalar alpha, const Tensor & out);
};
TORCH_API Tensor & add_out_sparse_cpu(const Tensor & self, const Tensor & other, Scalar alpha, Tensor & out);
TORCH_API Tensor & add_out_sparse_cuda(const Tensor & self, const Tensor & other, Scalar alpha, Tensor & out);
TORCH_API Tensor & mkldnn_add_out(const Tensor & self, const Tensor & other, Scalar alpha, Tensor & out);
TORCH_API Tensor add_sparse(const Tensor & self, const Tensor & other, Scalar alpha=1);
TORCH_API Tensor mkldnn_add(const Tensor & self, const Tensor & other, Scalar alpha=1);
TORCH_API Tensor & add_sparse_(Tensor & self, const Tensor & other, Scalar alpha=1);
TORCH_API Tensor & mkldnn_add_(Tensor & self, const Tensor & other, Scalar alpha=1);
TORCH_API Tensor add_relu(const Tensor & self, const Tensor & other, Scalar alpha=1);
TORCH_API Tensor & add_relu_(Tensor & self, const Tensor & other, Scalar alpha=1);
TORCH_API Tensor & add_relu_out(Tensor & out, const Tensor & self, const Tensor & other, Scalar alpha=1);
TORCH_API Tensor add(const Tensor & self, Scalar other, Scalar alpha=1);
TORCH_API Tensor & add_(Tensor & self, Scalar other, Scalar alpha=1);
TORCH_API Tensor addmv(const Tensor & self, const Tensor & mat, const Tensor & vec, Scalar beta=1, Scalar alpha=1);
TORCH_API Tensor & addmv_(Tensor & self, const Tensor & mat, const Tensor & vec, Scalar beta=1, Scalar alpha=1);
TORCH_API Tensor & addmv_out(Tensor & out, const Tensor & self, const Tensor & mat, const Tensor & vec, Scalar beta=1, Scalar alpha=1);
TORCH_API Tensor & addmv_impl_cpu(Tensor & self, const Tensor & self2, const Tensor & mat, const Tensor & vec, Scalar beta=1, Scalar alpha=1);
TORCH_API Tensor & addmv_impl_cuda(Tensor & self, const Tensor & self2, const Tensor & mat, const Tensor & vec, Scalar beta=1, Scalar alpha=1);
TORCH_API Tensor addr(const Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta=1, Scalar alpha=1);
TORCH_API Tensor math_addr(const Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta=1, Scalar alpha=1);
TORCH_API Tensor & addr_(Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta=1, Scalar alpha=1);
TORCH_API Tensor & addr_out(Tensor & out, const Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta=1, Scalar alpha=1);
TORCH_API Tensor & math_addr_out(Tensor & out, const Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta=1, Scalar alpha=1);
TORCH_API Tensor affine_grid_generator(const Tensor & theta, IntArrayRef size, bool align_corners);
TORCH_API Tensor affine_grid_generator_backward(const Tensor & grad, IntArrayRef size, bool align_corners);
TORCH_API Tensor all(const Tensor & self, int64_t dim, bool keepdim=false);
TORCH_API Tensor & all_out(Tensor & out, const Tensor & self, int64_t dim, bool keepdim=false);
TORCH_API Tensor all(const Tensor & self, Dimname dim, bool keepdim=false);
TORCH_API Tensor & all_out(Tensor & out, const Tensor & self, Dimname dim, bool keepdim=false);
TORCH_API bool allclose(const Tensor & self, const Tensor & other, double rtol=1e-05, double atol=1e-08, bool equal_nan=false);
TORCH_API Tensor any(const Tensor & self, int64_t dim, bool keepdim=false);
TORCH_API Tensor & any_out(Tensor & out, const Tensor & self, int64_t dim, bool keepdim=false);
TORCH_API Tensor any(const Tensor & self, Dimname dim, bool keepdim=false);
TORCH_API Tensor & any_out(Tensor & out, const Tensor & self, Dimname dim, bool keepdim=false);
TORCH_API Tensor arange(Scalar end, const TensorOptions & options={});
TORCH_API Tensor arange(Scalar start, Scalar end, const TensorOptions & options={});
TORCH_API Tensor arange(Scalar start, Scalar end, Scalar step, const TensorOptions & options={});
TORCH_API Tensor & arange_out(Tensor & out, Scalar end);
TORCH_API Tensor & arange_cpu_out(Tensor & out, Scalar start, Scalar end, Scalar step=1);
TORCH_API Tensor & arange_cuda_out(Tensor & out, Scalar start, Scalar end, Scalar step=1);
TORCH_API Tensor _dim_arange(const Tensor & like, int64_t dim);
TORCH_API Tensor argmax(const Tensor & self, c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false);
TORCH_API Tensor & argmax_out(Tensor & out, const Tensor & self, c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false);
TORCH_API Tensor argmin(const Tensor & self, c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false);
TORCH_API Tensor & argmin_out(Tensor & out, const Tensor & self, c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false);
TORCH_API Tensor acosh(const Tensor & self);
TORCH_API Tensor & acosh_(Tensor & self);
TORCH_API Tensor & acosh_out(Tensor & out, const Tensor & self);
TORCH_API Tensor arccosh(const Tensor & self);
TORCH_API Tensor & arccosh_(Tensor & self);
TORCH_API Tensor & arccosh_out(Tensor & out, const Tensor & self);
TORCH_API Tensor asinh(const Tensor & self);
TORCH_API Tensor & asinh_(Tensor & self);
TORCH_API Tensor & asinh_out(Tensor & out, const Tensor & self);
TORCH_API Tensor arcsinh(const Tensor & self);
TORCH_API Tensor & arcsinh_(Tensor & self);
TORCH_API Tensor & arcsinh_out(Tensor & out, const Tensor & self);
TORCH_API Tensor atanh(const Tensor & self);
TORCH_API Tensor & atanh_(Tensor & self);
TORCH_API Tensor & atanh_out(Tensor & out, const Tensor & self);
TORCH_API Tensor arctanh(const Tensor & self);
TORCH_API Tensor & arctanh_(Tensor & self);
TORCH_API Tensor & arctanh_out(Tensor & out, const Tensor & self);
TORCH_API Tensor as_strided_tensorimpl(const Tensor & self, IntArrayRef size, IntArrayRef stride, c10::optional<int64_t> storage_offset=c10::nullopt);
TORCH_API Tensor as_strided_qtensorimpl(const Tensor & self, IntArrayRef size, IntArrayRef stride, c10::optional<int64_t> storage_offset=c10::nullopt);
TORCH_API Tensor & as_strided_(Tensor & self, IntArrayRef size, IntArrayRef stride, c10::optional<int64_t> storage_offset=c10::nullopt);
TORCH_API Tensor asin(const Tensor & self);
TORCH_API Tensor asin_sparse(const Tensor & self);
TORCH_API Tensor & asin_(Tensor & self);
TORCH_API Tensor & asin_sparse_(Tensor & self);
TORCH_API Tensor & asin_out(Tensor & out, const Tensor & self);
TORCH_API Tensor & asin_out_sparse(Tensor & out, const Tensor & self);
TORCH_API Tensor arcsin(const Tensor & self);
TORCH_API Tensor & arcsin_(Tensor & self);
TORCH_API Tensor & arcsin_out(Tensor & out, const Tensor & self);
TORCH_API Tensor atan(const Tensor & self);
TORCH_API Tensor & atan_(Tensor & self);
TORCH_API Tensor & atan_out(Tensor & out, const Tensor & self);
TORCH_API Tensor arctan(const Tensor & self);
TORCH_API Tensor & arctan_(Tensor & self);
TORCH_API Tensor & arctan_out(Tensor & out, const Tensor & self);
TORCH_API Tensor atleast_1d(const Tensor & self);
TORCH_API std::vector<Tensor> atleast_1d(TensorList tensors);
TORCH_API Tensor atleast_2d(const Tensor & self);
TORCH_API std::vector<Tensor> atleast_2d(TensorList tensors);
TORCH_API Tensor atleast_3d(const Tensor & self);
TORCH_API std::vector<Tensor> atleast_3d(TensorList tensors);
TORCH_API Tensor baddbmm_cpu(const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta=1, Scalar alpha=1);
TORCH_API Tensor baddbmm_cuda(const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta=1, Scalar alpha=1);
TORCH_API Tensor & baddbmm__cpu(Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta=1, Scalar alpha=1);
TORCH_API Tensor & baddbmm__cuda(Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta=1, Scalar alpha=1);
TORCH_API Tensor & baddbmm_out_cpu(Tensor & out, const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta=1, Scalar alpha=1);
TORCH_API Tensor & baddbmm_out_cuda(Tensor & out, const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta=1, Scalar alpha=1);
TORCH_API Tensor & _baddbmm_mkl_(Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta=1, Scalar alpha=1);
TORCH_API Tensor bartlett_window(int64_t window_length, const TensorOptions & options={});
TORCH_API Tensor bartlett_window(int64_t window_length, bool periodic, const TensorOptions & options={});
TORCH_API Tensor batch_norm(const Tensor & input, const c10::optional<Tensor> & weight, const c10::optional<Tensor> & bias, const c10::optional<Tensor> & running_mean, const c10::optional<Tensor> & running_var, bool training, double momentum, double eps, bool cudnn_enabled);
TORCH_API Tensor quantized_batch_norm(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & mean, const Tensor & var, double eps, double output_scale, int64_t output_zero_point);
TORCH_API std::tuple<Tensor,Tensor,Tensor,Tensor,int64_t> _batch_norm_impl_index(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double momentum, double eps, bool cudnn_enabled);
TORCH_API std::tuple<Tensor,Tensor,Tensor> _batch_norm_impl_index_backward(int64_t impl_index, const Tensor & input, const Tensor & grad_output, const Tensor & weight, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_var_transform, bool train, double eps, std::array<bool,3> output_mask, const Tensor & reservedSpace);
TORCH_API Tensor bernoulli(const Tensor & self, c10::optional<Generator> generator=c10::nullopt);
TORCH_API Tensor & bernoulli_out(Tensor & out, const Tensor & self, c10::optional<Generator> generator=c10::nullopt);
TORCH_API Tensor & bernoulli_(Tensor & self, const Tensor & p, c10::optional<Generator> generator=c10::nullopt);
TORCH_API Tensor & bernoulli_(Tensor & self, double p=0.5, c10::optional<Generator> generator=c10::nullopt);
TORCH_API Tensor bernoulli(const Tensor & self, double p, c10::optional<Generator> generator=c10::nullopt);
TORCH_API Tensor bilinear(const Tensor & input1, const Tensor & input2, const Tensor & weight, const Tensor & bias);
TORCH_API Tensor binary_cross_entropy_cpu(const Tensor & self, const Tensor & target, const Tensor & weight={}, int64_t reduction=at::Reduction::Mean);
TORCH_API Tensor binary_cross_entropy_cuda(const Tensor & self, const Tensor & target, const Tensor & weight={}, int64_t reduction=at::Reduction::Mean);
TORCH_API Tensor & binary_cross_entropy_out_cpu(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight={}, int64_t reduction=at::Reduction::Mean);
TORCH_API Tensor & binary_cross_entropy_out_cuda(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight={}, int64_t reduction=at::Reduction::Mean);
TORCH_API Tensor binary_cross_entropy_backward_cpu(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight={}, int64_t reduction=at::Reduction::Mean);
TORCH_API Tensor binary_cross_entropy_backward_cuda(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight={}, int64_t reduction=at::Reduction::Mean);
TORCH_API Tensor & binary_cross_entropy_backward_out_cpu(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight={}, int64_t reduction=at::Reduction::Mean);
TORCH_API Tensor & binary_cross_entropy_backward_out_cuda(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight={}, int64_t reduction=at::Reduction::Mean);
TORCH_API Tensor binary_cross_entropy_with_logits(const Tensor & self, const Tensor & target, const Tensor & weight={}, const Tensor & pos_weight={}, int64_t reduction=at::Reduction::Mean);
TORCH_API Tensor binary_cross_entropy_with_logits_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight={}, const Tensor & pos_weight={}, int64_t reduction=at::Reduction::Mean);
TORCH_API Tensor _bincount_cpu(const Tensor & self, const Tensor & weights={}, int64_t minlength=0);
TORCH_API Tensor _bincount_cuda(const Tensor & self, const Tensor & weights={}, int64_t minlength=0);
TORCH_API Tensor bitwise_not(const Tensor & self);
TORCH_API Tensor & bitwise_not_(Tensor & self);
TORCH_API Tensor & bitwise_not_out(Tensor & out, const Tensor & self);
TORCH_API Tensor copysign(const Tensor & self, const Tensor & other);
TORCH_API Tensor & copysign_(Tensor & self, const Tensor & other);
TORCH_API Tensor & copysign_out(Tensor & out, const Tensor & self, const Tensor & other);
TORCH_API Tensor copysign(const Tensor & self, Scalar other);
TORCH_API Tensor & copysign_(Tensor & self, Scalar other);
TORCH_API Tensor logical_not(const Tensor & self);
TORCH_API Tensor & logical_not_(Tensor & self);
TORCH_API Tensor & logical_not_out(Tensor & out, const Tensor & self);
TORCH_API Tensor logical_xor(const Tensor & self, const Tensor & other);
TORCH_API Tensor & logical_xor_(Tensor & self, const Tensor & other);
TORCH_API Tensor & logical_xor_out(Tensor & out, const Tensor & self, const Tensor & other);
TORCH_API Tensor logical_and(const Tensor & self, const Tensor & other);
TORCH_API Tensor & logical_and_(Tensor & self, const Tensor & other);
TORCH_API Tensor & logical_and_out(Tensor & out, const Tensor & self, const Tensor & other);
TORCH_API Tensor logical_or(const Tensor & self, const Tensor & other);
TORCH_API Tensor & logical_or_(Tensor & self, const Tensor & other);
TORCH_API Tensor & logical_or_out(Tensor & out, const Tensor & self, const Tensor & other);
TORCH_API Tensor blackman_window(int64_t window_length, const TensorOptions & options={});
TORCH_API Tensor blackman_window(int64_t window_length, bool periodic, const TensorOptions & options={});
TORCH_API Tensor bmm_cpu(const Tensor & self, const Tensor & mat2);
TORCH_API Tensor bmm_cuda(const Tensor & self, const Tensor & mat2);
TORCH_API Tensor bmm_sparse_cpu(const Tensor & self, const Tensor & mat2);
TORCH_API Tensor bmm_sparse_cuda(const Tensor & self, const Tensor & mat2);
TORCH_API Tensor & bmm_out_cpu(Tensor & out, const Tensor & self, const Tensor & mat2);
TORCH_API Tensor & bmm_out_cuda(Tensor & out, const Tensor & self, const Tensor & mat2);
TORCH_API Tensor & bmm_out_sparse_cpu(Tensor & out, const Tensor & self, const Tensor & mat2);
TORCH_API Tensor & bmm_out_sparse_cuda(Tensor & out, const Tensor & self, const Tensor & mat2);
TORCH_API Tensor _bmm_sparse_cuda(const Tensor & self, const Tensor & mat2, bool deterministic=false);
TORCH_API Tensor & _bmm_out_sparse_cuda(Tensor & out, const Tensor & self, const Tensor & mat2, bool deterministic=false);
TORCH_API std::vector<Tensor> broadcast_tensors(TensorList tensors);
TORCH_API Tensor broadcast_to(const Tensor & self, IntArrayRef size);
TORCH_API Tensor cat(TensorList tensors, int64_t dim=0);
TORCH_API Tensor & cat_out(Tensor & out, TensorList tensors, int64_t dim=0);
TORCH_API Tensor cat(TensorList tensors, Dimname dim);
TORCH_API Tensor & cat_out(Tensor & out, TensorList tensors, Dimname dim);
TORCH_API Tensor block_diag(TensorList tensors);
TORCH_API Tensor ceil(const Tensor & self);
TORCH_API Tensor & ceil_(Tensor & self);
TORCH_API Tensor & ceil_out(Tensor & out, const Tensor & self);
TORCH_API Tensor chain_matmul(TensorList matrices);
TORCH_API std::vector<Tensor> unsafe_chunk(const Tensor & self, int64_t chunks, int64_t dim=0);
TORCH_API std::vector<Tensor> chunk(const Tensor & self, int64_t chunks, int64_t dim=0);
TORCH_API std::vector<Tensor> tensor_split(const Tensor & self, int64_t sections, int64_t dim=0);
TORCH_API std::vector<Tensor> tensor_split(const Tensor & self, IntArrayRef indices, int64_t dim=0);
TORCH_API std::vector<Tensor> tensor_split(const Tensor & self, const Tensor & tensor_indices_or_sections, int64_t dim=0);
TORCH_API Tensor clamp(const Tensor & self, c10::optional<Scalar> min=c10::nullopt, c10::optional<Scalar> max=c10::nullopt);
TORCH_API Tensor clamp_quantized_cpu(const Tensor & self, c10::optional<Scalar> min=c10::nullopt, c10::optional<Scalar> max=c10::nullopt);
TORCH_API Tensor & clamp_(Tensor & self, c10::optional<Scalar> min=c10::nullopt, c10::optional<Scalar> max=c10::nullopt);
TORCH_API Tensor & clamp_out(Tensor & out, const Tensor & self, c10::optional<Scalar> min=c10::nullopt, c10::optional<Scalar> max=c10::nullopt);
TORCH_API Tensor clamp_max(const Tensor & self, Scalar max);
TORCH_API Tensor & clamp_max_(Tensor & self, Scalar max);
TORCH_API Tensor & clamp_max_out(Tensor & out, const Tensor & self, Scalar max);
TORCH_API Tensor clamp_min(const Tensor & self, Scalar min);
TORCH_API Tensor & clamp_min_(Tensor & self, Scalar min);
TORCH_API Tensor & clamp_min_out(Tensor & out, const Tensor & self, Scalar min);
TORCH_API Tensor clip(const Tensor & self, c10::optional<Scalar> min=c10::nullopt, c10::optional<Scalar> max=c10::nullopt);
TORCH_API Tensor & clip_(Tensor & self, c10::optional<Scalar> min=c10::nullopt, c10::optional<Scalar> max=c10::nullopt);
TORCH_API Tensor & clip_out(Tensor & out, const Tensor & self, c10::optional<Scalar> min=c10::nullopt, c10::optional<Scalar> max=c10::nullopt);
TORCH_API bool cudnn_is_acceptable(const Tensor & self);
TORCH_API Tensor complex(const Tensor & real, const Tensor & imag);
TORCH_API Tensor & complex_out(Tensor & out, const Tensor & real, const Tensor & imag);
TORCH_API Tensor polar(const Tensor & abs, const Tensor & angle);
TORCH_API Tensor & polar_out(Tensor & out, const Tensor & abs, const Tensor & angle);
TORCH_API Tensor constant_pad_nd(const Tensor & self, IntArrayRef pad, Scalar value=0);
TORCH_API Tensor contiguous(const Tensor & self, MemoryFormat memory_format=MemoryFormat::Contiguous);
TORCH_API Tensor convolution(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups);
TORCH_API Tensor convolution_overrideable(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups);
TORCH_API std::tuple<Tensor,Tensor,Tensor> convolution_backward_overrideable(const Tensor & grad_output, const Tensor & input, const Tensor & weight, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups, std::array<bool,3> output_mask);
TORCH_API Tensor _convolution(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32);
TORCH_API Tensor _convolution(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled);
TORCH_API Tensor _convolution_nogroup(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding);
TORCH_API std::tuple<Tensor,Tensor,Tensor> _convolution_double_backward(const Tensor & ggI, const Tensor & ggW, const Tensor & ggb, const Tensor & gO, const Tensor & weight, const Tensor & self, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, std::array<bool,3> output_mask);
TORCH_API Tensor conv1d(const Tensor & input, const Tensor & weight, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef dilation=1, int64_t groups=1);
TORCH_API Tensor conv2d(const Tensor & input, const Tensor & weight, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef dilation=1, int64_t groups=1);
TORCH_API Tensor conv3d(const Tensor & input, const Tensor & weight, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef dilation=1, int64_t groups=1);
TORCH_API Tensor conv_tbc(const Tensor & self, const Tensor & weight, const Tensor & bias, int64_t pad=0);
TORCH_API std::tuple<Tensor,Tensor,Tensor> conv_tbc_backward(const Tensor & self, const Tensor & input, const Tensor & weight, const Tensor & bias, int64_t pad);
TORCH_API Tensor conv_transpose1d(const Tensor & input, const Tensor & weight, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef output_padding=0, int64_t groups=1, IntArrayRef dilation=1);
TORCH_API Tensor conv_transpose2d(const Tensor & input, const Tensor & weight, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef output_padding=0, int64_t groups=1, IntArrayRef dilation=1);
TORCH_API Tensor conv_transpose3d(const Tensor & input, const Tensor & weight, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef output_padding=0, int64_t groups=1, IntArrayRef dilation=1);
TORCH_API Tensor & copy_(Tensor & self, const Tensor & src, bool non_blocking=false);
TORCH_API Tensor cos(const Tensor & self);
TORCH_API Tensor & cos_(Tensor & self);
TORCH_API Tensor & cos_out(Tensor & out, const Tensor & self);
TORCH_API Tensor cosh(const Tensor & self);
TORCH_API Tensor & cosh_(Tensor & self);
TORCH_API Tensor & cosh_out(Tensor & out, const Tensor & self);
TORCH_API Tensor cosine_embedding_loss(const Tensor & input1, const Tensor & input2, const Tensor & target, double margin=0.0, int64_t reduction=at::Reduction::Mean);
TORCH_API Tensor count_nonzero(const Tensor & self, IntArrayRef dim);
TORCH_API Tensor count_nonzero(const Tensor & self, c10::optional<int64_t> dim=c10::nullopt);
TORCH_API Tensor cudnn_affine_grid_generator_forward(const Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W);
TORCH_API Tensor cudnn_affine_grid_generator_backward(const Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W);
TORCH_API std::tuple<Tensor,Tensor,Tensor,Tensor> cudnn_batch_norm(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double exponential_average_factor, double epsilon);
TORCH_API std::tuple<Tensor,Tensor,Tensor> cudnn_batch_norm_backward(const Tensor & input, const Tensor & grad_output, const Tensor & weight, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_var, double epsilon, const Tensor & reserveSpace);
TORCH_API Tensor cudnn_convolution_deprecated(const Tensor & self, const Tensor & weight, const Tensor & bias, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic);
TORCH_API Tensor cudnn_convolution_deprecated2(const Tensor & self, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic);
TORCH_API Tensor cudnn_convolution(const Tensor & self, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32);
TORCH_API Tensor cudnn_convolution_backward_input(IntArrayRef self_size, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32);
TORCH_API std::tuple<Tensor,Tensor> cudnn_convolution_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32, std::array<bool,2> output_mask);
TORCH_API Tensor cudnn_convolution_backward_weight(IntArrayRef weight_size, const Tensor & grad_output, const Tensor & self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32);
TORCH_API Tensor cudnn_convolution_transpose_deprecated(const Tensor & self, const Tensor & weight, const Tensor & bias, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic);
TORCH_API Tensor cudnn_convolution_transpose_deprecated2(const Tensor & self, const Tensor & weight, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic);
TORCH_API Tensor cudnn_convolution_transpose(const Tensor & self, const Tensor & weight, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32);
TORCH_API std::tuple<Tensor,Tensor> cudnn_convolution_transpose_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32, std::array<bool,2> output_mask);
TORCH_API Tensor cudnn_convolution_transpose_backward_input(const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32);
TORCH_API Tensor cudnn_convolution_transpose_backward_weight(IntArrayRef weight_size, const Tensor & grad_output, const Tensor & self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32);
TORCH_API Tensor cudnn_grid_sampler_forward(const Tensor & self, const Tensor & grid);
TORCH_API std::tuple<Tensor,Tensor> cudnn_grid_sampler_backward(const Tensor & self, const Tensor & grid, const Tensor & grad_output);
TORCH_API std::tuple<Tensor,Tensor> cummax(const Tensor & self, int64_t dim);
TORCH_API std::tuple<Tensor &,Tensor &> cummax_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t dim);
TORCH_API std::tuple<Tensor,Tensor> cummax(const Tensor & self, Dimname dim);
TORCH_API std::tuple<Tensor &,Tensor &> cummax_out(Tensor & values, Tensor & indices, const Tensor & self, Dimname dim);
TORCH_API void cummax_helper_cpu(const Tensor & self, Tensor & values, Tensor & indices, int64_t dim);
TORCH_API void cummax_helper_cuda(const Tensor & self, Tensor & values, Tensor & indices, int64_t dim);
TORCH_API std::tuple<Tensor,Tensor> cummin(const Tensor & self, int64_t dim);
TORCH_API std::tuple<Tensor &,Tensor &> cummin_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t dim);
TORCH_API std::tuple<Tensor,Tensor> cummin(const Tensor & self, Dimname dim);
Loading ...