#pragma once
#include <c10/core/Device.h>
#include <c10/core/Layout.h>
#include <c10/core/MemoryFormat.h>
#include <c10/core/QScheme.h>
#include <c10/core/Stream.h>
#include <c10/core/Scalar.h>
#include <c10/core/ScalarType.h>
#include <c10/core/ScalarTypeToTypeMeta.h>
#include <c10/core/Storage.h>
#include <ATen/core/TensorAccessor.h>
#include <c10/core/TensorImpl.h>
#include <c10/core/UndefinedTensorImpl.h>
#include <c10/core/WrapDimMinimal.h>
#include <c10/util/Exception.h>
#include <c10/util/Deprecated.h>
#include <c10/util/Optional.h>
#include <c10/util/intrusive_ptr.h>
#include <ATen/core/DeprecatedTypePropertiesRegistry.h>
#include <ATen/core/DeprecatedTypeProperties.h>
#include <ATen/core/NamedTensor.h>
#include <ATen/core/QuantizerBase.h>
#include <torch/csrc/WindowsTorchApiMacro.h>
namespace caffe2 {
class Tensor;
}
namespace c10{
struct TensorOptions;
template<class T> class List;
}
namespace at {
struct Generator;
struct Type;
class DeprecatedTypeProperties;
class Tensor;
} // namespace at
namespace at {
namespace indexing {
struct TensorIndex;
} // namespace indexing
} // namespace at
namespace torch { namespace autograd {
struct Node;
}} // namespace torch::autograd
namespace at {
class Tensor;
using TensorList = ArrayRef<Tensor>;
using Stream = c10::Stream;
namespace impl {
inline bool variable_excluded_from_dispatch() {
#ifdef C10_MOBILE
// Please read the comment in `VariableFallbackKernel.cpp` about the background of this change.
return true;
#else
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!c10::impl::tls_local_dispatch_key_set().excluded_.has(DispatchKey::Autograd));
return c10::impl::tls_local_dispatch_key_set().excluded_.isSupersetOf(c10::autograd_dispatch_keyset);
#endif
}
}
// Tensor is a "generic" object holding a pointer to the underlying TensorImpl object, which
// has an embedded reference count. In this way, Tensor is similar to boost::intrusive_ptr.
//
// For example:
//
// void func(Tensor a) {
// Tensor b = a;
// ...
// }
//
// In this example, when we say Tensor b = a, we are creating a new object that points to the
// same underlying TensorImpl, and bumps its reference count. When b goes out of scope, the
// destructor decrements the reference count by calling release() on the TensorImpl it points to.
// The existing constructors, operator overloads, etc. take care to implement the correct semantics.
//
// Note that Tensor can also be NULL, i.e. it is not associated with any underlying TensorImpl, and
// special care must be taken to handle this.
class TORCH_API Tensor {
public:
Tensor(){};
// This constructor should not be used by end users and is an implementation
// detail invoked by autogenerated code.
explicit Tensor(
c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl> tensor_impl)
: impl_(std::move(tensor_impl)) {
if (impl_.get() == nullptr) {
throw std::runtime_error("TensorImpl with nullptr is not supported");
}
}
Tensor(const Tensor&) = default;
Tensor(Tensor&&) = default;
public:
// Creates a new wrapper from TensorImpl. Intentionally a free method because
// it should be used with care. Checks necessary invariants
static Tensor wrap_tensor_impl(
c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl> tensor_impl) {
Tensor r(std::move(tensor_impl));
r.enforce_invariants();
return r;
}
int64_t dim() const {
return impl_->dim();
}
int64_t storage_offset() const {
return impl_->storage_offset();
}
Tensor contiguous(MemoryFormat memory_format=MemoryFormat::Contiguous) const {
if (is_contiguous(memory_format)) {
return *this;
} else {
return __dispatch_contiguous(memory_format);
}
}
int64_t size(int64_t dim) const {
// false is passed to maybe_wrap_dim so behavior is identical to array access (but with wrapping)
dim = c10::maybe_wrap_dim(dim, this->dim(), false);
return sizes()[dim];
}
int64_t stride(int64_t dim) const {
// false is passed to maybe_wrap_dim so behavior is identical to array access (but with wrapping)
dim = c10::maybe_wrap_dim(dim, this->dim(), false);
return strides()[dim];
}
TensorImpl * unsafeGetTensorImpl() const {
return impl_.get();
}
TensorImpl * unsafeReleaseTensorImpl() {
return impl_.release();
}
const c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>& getIntrusivePtr() const {
return impl_;
}
c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl> unsafeReleaseIntrusivePtr() {
return std::move(impl_);
}
bool defined() const {
return impl_;
}
void reset() {
impl_.reset();
}
// The following overloads are very intruiging. Consider the following
// program:
//
// x[1] = 3;
//
// We would expect that the first entry of x is written to 3. But how can we
// actually achieve this? x[1] evaluates to a tensor...
//
// The answer is, using a ref-qualifier. x[1] is an rvalue, which cannot be
// (profitably) assigned to in the traditional sense, so we overload
// assignment to mean, "Actually, copy 3 into the tensor data." This is done
// with an rvalue-reference ref-qualified overload (the methods with && at the
// end of their type.)
//
// There's one more fly in the ointment: We also want
//
// Tensor x = y;
//
// to work, and we want it NOT to copy. So we need a traditional operator=
// overload. But we MUST specify a mutable lvalue ref-qualifier, to
// disambiguate the traditional overload from the rvalue-reference
// ref-qualified overload. Otherwise, it will be ambiguous, because
// a non ref-qualified method is eligible for all situations.
// Unfortunately, we have to write these constructors out manually
// to work around an MSVC bug:
// error C2580: 'at::Tensor &at::Tensor::operator =(const at::Tensor &) &':
// multiple versions of a defaulted special member functions are not allowed
// Tensor& operator=(const Tensor&) & = default;
// Tensor& operator=(Tensor&&) & = default;
// Also MSVC will wrongly issue the following warning with the aforementioned fix
// warning C4522: 'at::Tensor': multiple assignment operators specified
// Let's just skip the warning.
#ifdef _MSC_VER
#pragma warning( push )
#pragma warning( disable : 4522 )
#endif
Tensor& operator=(const Tensor& x) & {
impl_ = x.impl_;
return *this;
}
Tensor& operator=(Tensor&& x) & {
impl_ = std::move(x.impl_);
return *this;
}
Tensor& operator=(Scalar v) &&;
Tensor& operator=(const Tensor&) &&;
Tensor& operator=(Tensor&&) &&;
bool is_same(const Tensor& other) const noexcept {
return impl_ == other.impl_;
}
size_t use_count() const noexcept {
return impl_.use_count();
}
size_t weak_use_count() const noexcept {
return impl_.weak_use_count();
}
std::string toString() const;
IntArrayRef sizes() const {
return impl_->sizes();
}
IntArrayRef strides() const {
return impl_->strides();
}
// See impl::get_opt_names in ATen/NamedTensor.h for docs.
c10::optional<DimnameList> opt_names() const {
return impl::get_opt_names(unsafeGetTensorImpl());
}
// See impl::get_names in ATen/NamedTensor.h for docs.
DimnameList names() const {
return impl::get_names(unsafeGetTensorImpl());
}
int64_t ndimension() const {
return dim();
}
bool is_contiguous(at::MemoryFormat memory_format=at::MemoryFormat::Contiguous) const {
return impl_->is_contiguous(memory_format);
}
bool is_non_overlapping_and_dense() const {
return impl_->is_non_overlapping_and_dense();
}
at::MemoryFormat suggest_memory_format(
bool channels_last_strides_exact_match = false) const {
// Setting channels_last_strides_exact_match to true forces function to
// check 0,1 - sized dimension strides.
if (!is_mkldnn() && !is_sparse()) {
if (impl_->is_strides_like_channels_last()) {
if (!channels_last_strides_exact_match ||
get_channels_last_strides_2d(sizes()) == strides()) {
return at::MemoryFormat::ChannelsLast;
}
}
else if (impl_->is_strides_like_channels_last_3d()) {
if (!channels_last_strides_exact_match ||
get_channels_last_strides_3d(sizes()) == strides()) {
return at::MemoryFormat::ChannelsLast3d;
}
}
}
return at::MemoryFormat::Contiguous;
}
// Total bytes consumed by the "view" of elements of the array. Does not
// include size of metadata. The number reported here does not necessarily
// correspond to the true physical memory consumed by a tensor; instead,
// it reports the memory the tensor would take *if* it were contiguous.
// Defined to be numel() * itemsize()
size_t nbytes() const {
TORCH_CHECK(layout () != at::kSparse,
"nbytes is not defined for sparse tensors. If you want the size of the constituent " \
"tensors, add the nbytes of the indices and values. If you want the size of the " \
"equivalent dense tensor, multiply numel() by element_size()");
return impl_->numel() * impl_->itemsize();
}
int64_t numel() const {
return impl_->numel();
}
// Length of one array element in bytes. This is the traditional
// Numpy naming.
size_t itemsize() const {
return impl_->itemsize();
}
// Same as itemsize(). This is the PyTorch naming.
int64_t element_size() const {
return static_cast<int64_t>(impl_->itemsize());
}
C10_DEPRECATED_MESSAGE("Tensor.type() is deprecated. Instead use Tensor.options(), which in many cases (e.g. in a constructor) is a drop-in replacement. If you were using data from type(), that is now available from Tensor itself, so instead of tensor.type().scalar_type(), use tensor.scalar_type() instead and instead of tensor.type().backend() use tensor.device().")
DeprecatedTypeProperties & type() const {
return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
dispatchKeyToBackend(legacyExtractDispatchKey(key_set())),
scalar_type());
}
DispatchKeySet key_set() const {
return impl_->key_set();
}
ScalarType scalar_type() const {
return typeMetaToScalarType(impl_->dtype());
}
bool has_storage() const {
return defined() && impl_->has_storage();
}
const Storage& storage() const {
return impl_->storage();
}
bool is_alias_of(const at::Tensor& other) const{
return impl_->storage().is_alias_of(other.storage());
}
Tensor toType(ScalarType t) const;
Tensor toBackend(Backend b) const;
C10_DEPRECATED_MESSAGE("Tensor.is_variable() is deprecated; everything is a variable now. (If you want to assert that variable has been appropriately handled already, use at::impl::variable_excluded_from_dispatch())")
bool is_variable() const noexcept {
return !at::impl::variable_excluded_from_dispatch();
}
/// Returns a `Tensor`'s layout. Defined in Type.h
Layout layout() const noexcept;
/// Returns a `Tensor`'s dtype (`TypeMeta`). Defined in TensorMethods.cpp
caffe2::TypeMeta dtype() const noexcept;
/// Returns a `Tensor`'s device.
Device device() const;
/// Returns a `Tensor`'s device index.
int64_t get_device() const;
/// Returns if a `Tensor` has CUDA backend.
bool is_cuda() const;
Loading ...