from sys import maxsize
import torch
import torch.onnx.symbolic_helper as sym_help
import warnings
import numpy
from torch.onnx.symbolic_helper import parse_args, _unimplemented, _is_tensor_list
from torch.onnx.symbolic_opset9 import expand, unused
from torch.nn.modules.utils import _single, _pair, _triple
from torch.onnx.utils import _add_block, _add_input_to_block, _add_output_to_block
# EDITING THIS FILE? READ THIS FIRST!
# see Note [Edit Symbolic Files] in symbolic_helper.py
# This file exports ONNX ops for opset 11
@parse_args('v', 'f', 'f')
def hardtanh(g, self, min_val, max_val):
dtype = self.type().scalarType()
if dtype is None:
dtype = 6 # float
else:
dtype = sym_help.scalar_type_to_onnx.index(sym_help.cast_pytorch_to_onnx[dtype])
min_val = g.op("Constant", value_t=torch.tensor(min_val, dtype=sym_help.scalar_type_to_pytorch_type[dtype]))
max_val = g.op("Constant", value_t=torch.tensor(max_val, dtype=sym_help.scalar_type_to_pytorch_type[dtype]))
return g.op("Clip", self, min_val, max_val)
def clamp(g, self, min, max):
dtype = self.type().scalarType()
def _cast_if_not_none(tensor, dtype):
if tensor is not None and not sym_help._is_none(tensor):
return g.op("Cast", tensor, to_i=sym_help.cast_pytorch_to_onnx[dtype])
else:
return tensor
if dtype is not None:
min = _cast_if_not_none(min, dtype)
max = _cast_if_not_none(max, dtype)
return g.op("Clip", self, min, max)
def clamp_min(g, self, min):
max = unused(g)
return clamp(g, self, min, max)
def clamp_max(g, self, max):
min = unused(g)
return clamp(g, self, min, max)
# Opset 11 gather accepts negative indices
@parse_args('v', 'i', 'v')
def select(g, self, dim, index):
return g.op("Gather", self, index, axis_i=dim)
def index_put(g, self, indices_list_value, values, accumulate=False):
indices_list = sym_help._unpack_list(indices_list_value)
if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:
args = [self] + indices_list + [values, accumulate]
return g.op("ATen", *args, operator_s='index_put')
from torch.onnx.symbolic_opset9 import add, expand
accumulate = sym_help._parse_arg(accumulate, 'b')
index = indices_list[0]
if len(indices_list) > 1:
for ind in indices_list[1:]:
index = add(g, index, ind)
broadcast_index_shape = g.op("Shape", index)
indices_list = [
sym_help._unsqueeze_helper(g, expand(g, ind, broadcast_index_shape, None), [-1]) for ind in indices_list
]
index = g.op("Concat", *indices_list, axis_i=-1)
else:
# Replace index_put node with masked_scatter or masked_fill
# when inputs to the index_put node contains boolean inputs
#
# index_put -> masked_fill
#
# before graph(%0 : Float(2, 2, 2, strides=[4, 2, 1], requires_grad=1, device=cpu),
# %some_const : Float(requires_grad=0, device=cpu)):
# %6 : None = prim::Constant()
# %mask : Float(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu) = aten::clone(%0, %6)
# %8 : Bool(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu) = aten::ne(%mask, %some_const)
# %26 : Long(requires_grad=0, device=cpu) = prim::Constant[value={11}]()
# %27 : Long(requires_grad=0, device=cpu) = prim::Constant[value={0}]()
# %11 : Device = prim::Constant[value="cpu"]()
# %12 : None = prim::Constant()
# %28 : Long(requires_grad=0, device=cpu) = prim::Constant[value={0}]()
# %29 : Long(requires_grad=0, device=cpu) = prim::Constant[value={0}]()
# %15 : None = prim::Constant()
# %16 : Bool(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu) =
# aten::to(%8, %26, %27, %11, %12, %28, %29, %15)
# %18 : Float(requires_grad=0, device=cpu) = prim::Constant[value={1}]()
# %30 : Long(requires_grad=0, device=cpu) = prim::Constant[value={0}]()
# %22 : int[] = prim::Constant[value=[-1]]()
# %23 : Tensor = aten::view(%16, %22)
# %24 : Tensor?[] = prim::ListConstruct(%23)
# %25 : Float(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu) =
# aten::index_put(%mask, %24, %18, %30)
# return (%25)
#
# after graph(%0 : Float(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu),
# %some_const : Float(requires_grad=0, device=cpu)):
# %3 : Tensor = onnx::Equal(%0, %some_const)
# %4 : Bool(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu) = onnx::Not(%3)
# %12 : Bool(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu) = onnx::Cast[to=9](%4)
# %19 : Tensor = onnx::Cast[to=9](%12)
# %20 : Tensor = onnx::Constant[value={1}]()
# %21 : Float(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu)
# = onnx::Where(%19, %20, %0)
# return (%21)
#
# index_put -> masked_scatter
#
# before graph(%0 : Float(2, 2, 2, strides=[4, 2, 1], requires_grad=1, device=cpu),
# %some_const : Float(requires_grad=0, device=cpu)):
# %6 : None = prim::Constant()
# %mask : Float(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu) = aten::clone(%0, %6)
# %28 : Float(8, strides=[1], requires_grad=0, device=cpu)
# = prim::Constant[value= 1 1 1 1 1 1 1 1 [ CPUFloatType{8} ]]()
# %15 : Bool(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu)
# = aten::ne(%mask, %some_const)
# %34 : Long(requires_grad=0, device=cpu) = prim::Constant[value={11}]()
# %35 : Long(requires_grad=0, device=cpu) = prim::Constant[value={0}]()
# %18 : Device = prim::Constant[value="cpu"]()
# %19 : None = prim::Constant()
# %36 : Long(requires_grad=0, device=cpu) = prim::Constant[value={0}]()
# %37 : Long(requires_grad=0, device=cpu) = prim::Constant[value={0}]()
# %22 : None = prim::Constant()
# %23 : Bool(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu)
# = aten::to(%15, %34, %35, %18, %19, %36, %37, %22)
# %38 : Long(requires_grad=0, device=cpu) = prim::Constant[value={0}]()
# %30 : int[] = prim::Constant[value=[-1]]()
# %31 : Tensor = aten::view(%23, %30)
# %32 : Tensor?[] = prim::ListConstruct(%31)
# %33 : Float(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu)
# = aten::index_put(%mask, %32, %28, %38)
# return (%33)
#
# after graph(%0 : Float(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu),
# %some_const : Float(requires_grad=0, device=cpu)):
# %3 : Float(8, strides=[1], requires_grad=0, device=cpu)
# = onnx::Constant[value= 1 1 1 1 1 1 1 1 [ CPUFloatType{8} ]]()
# %4 : Tensor = onnx::Equal(%0, %some_const)
# %5 : Bool(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu) = onnx::Not(%4)
# %13 : Bool(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu) = onnx::Cast[to=9](%5)
# %19 : Tensor = onnx::Shape(%0)
# %20 : Tensor = onnx::Expand(%13, %19)
# %21 : Tensor = onnx::NonZero(%20)
# %22 : Tensor = onnx::Transpose[perm=[1, 0]](%21)
# %23 : Tensor = onnx::Constant[value={-1}]()
# %24 : Tensor = onnx::Reshape(%3, %23)
# %25 : Tensor = onnx::Shape(%22)
# %27 : Tensor = onnx::Constant[value={0}]()
# %28 : Tensor = onnx::Gather[axis=0](%25, %27)
# %29 : Tensor = onnx::Constant[value={0}]()
# %30 : Tensor = onnx::Unsqueeze[axes=[0]](%29)
# %31 : Tensor = onnx::Unsqueeze[axes=[0]](%28)
# %32 : Tensor = onnx::Constant[value={0}]()
# %33 : Tensor = onnx::Unsqueeze[axes=[0]](%32)
# %34 : Tensor = onnx::Slice(%24, %30, %31, %33)
# %35 : Float(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu)
# = onnx::ScatterND(%0, %22, %34)
# return (%35)
bool_inp = list(index.node().inputs())[0]
if bool_inp.type() is not None and bool_inp.type().scalarType() == 'Bool':
rank = sym_help._get_tensor_rank(values)
if rank is not None and rank == 0:
from torch.onnx.symbolic_opset9 import masked_fill
return masked_fill(g, self, bool_inp, values)
return masked_scatter(g, self, bool_inp, values)
broadcast_index_shape = g.op("Shape", index)
index = sym_help._unsqueeze_helper(g, index, [-1])
sub_data_shape = sym_help._slice_helper(
g, g.op("Shape", self), axes=[0], starts=[len(indices_list)], ends=[maxsize])
values_shape = g.op("Concat", broadcast_index_shape, sub_data_shape, axis_i=0)
values = g.op("Reshape", values, values_shape)
if accumulate:
dtype = self.type().scalarType()
dtype = sym_help.scalar_type_to_onnx.index(sym_help.cast_pytorch_to_onnx[dtype])
dtype = sym_help.scalar_type_to_pytorch_type[dtype]
zeros = g.op("ConstantOfShape", g.op("Shape", self), value_t=torch.tensor([0], dtype=dtype))
result = g.op("ScatterND", zeros, index, values)
result = add(g, self, result)
else:
result = g.op("ScatterND", self, index, values)
return result
@parse_args('v', 'i')
def pixel_shuffle(g, self, upscale_factor):
rank = sym_help._get_tensor_rank(self)
if rank is not None and rank != 4:
return _unimplemented("pixel_shuffle", "only support 4d input")
return g.op("DepthToSpace", self, blocksize_i=upscale_factor, mode_s="CRD")
def _interpolate(name, dim, interpolate_mode):
return sym_help._interpolate_helper(name, dim, interpolate_mode)
upsample_nearest1d = _interpolate('upsample_nearest1d', 3, "nearest")
upsample_nearest2d = _interpolate('upsample_nearest2d', 4, "nearest")
upsample_nearest3d = _interpolate('upsample_nearest3d', 5, "nearest")
upsample_linear1d = _interpolate('upsample_linear1d', 3, "linear")
upsample_bilinear2d = _interpolate('upsample_bilinear2d', 4, "linear")
upsample_trilinear3d = _interpolate('upsample_trilinear3d', 5, "linear")
upsample_bicubic2d = _interpolate('upsample_bicubic2d', 4, "cubic")
def __interpolate(g, input, size, scale_factor, mode, align_corners, recompute_scale_factor):
return sym_help.__interpolate_helper(g, input, size, scale_factor, mode, align_corners, recompute_scale_factor)
@parse_args('v', 'i', 'v', 'v')
def gather(g, self, dim, index, sparse_grad=False):
if sym_help._maybe_get_const(sparse_grad, 'i'):
return _unimplemented("gather", "sparse_grad == True")
if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:
return g.op("ATen", self, dim, index, sparse_grad, operator_s="gather")
return g.op("GatherElements", self, index, axis_i=dim)
@parse_args('v', 'i', 'v', 'v')
def scatter(g, self, dim, index, src):
from torch.onnx.symbolic_opset9 import expand_as
if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:
return g.op("ATen", self, dim, index, src, operator_s="scatter")
src_type = src.type().scalarType()
src = sym_help._maybe_get_scalar(src)
if sym_help._is_value(src):
return g.op("ScatterElements", self, index, src, axis_i=dim)
else:
# Check if scalar 'src' has same type as self (PyTorch allows different
# type for scalar src (but not when src is tensor)). If not, insert Cast node.
if self.type().scalarType() != src_type:
src = g.op("Cast", src, to_i=sym_help.cast_pytorch_to_onnx[self.type().scalarType()])
return g.op("ScatterElements", self, index, expand_as(g, src, index), axis_i=dim)
@parse_args('v', 'i', 'none')
def cumsum(g, self, dim, dtype=None):
dim_tensor = g.op("Constant", value_t=torch.tensor(dim, dtype=torch.int))
if dtype and dtype.node().kind() != 'prim::Constant':
parsed_dtype = sym_help._get_const(dtype, 'i', 'dtype')
cast = g.op("Cast", self, to_i=sym_help.scalar_type_to_onnx[parsed_dtype])
else:
cast = self
csum = g.op("CumSum", cast, dim_tensor)
return csum
def masked_select(g, self, mask):
from torch.onnx.symbolic_opset9 import nonzero, expand_as
index = nonzero(g, expand_as(g, mask, self))
return g.op('GatherND', self, index)
def masked_scatter(g, self, mask, source):
from torch.onnx.symbolic_opset9 import nonzero, expand_as, view, size
index = nonzero(g, expand_as(g, mask, self))
# NOTE: source can have more elements than needed.
# It could also have arbitrary shape.
# This is not supported by ONNX::ScatterND, so we need to flatten and slice source tensor.
source = view(g, source, torch.LongTensor([-1]))
source = sym_help._slice_helper(g, source,
axes=torch.LongTensor([0]),
starts=torch.LongTensor([0]),
ends=size(g, index, torch.LongTensor([0])),
dynamic_slice=True)
return g.op('ScatterND', self, index, source)
def _len(g, self):
if _is_tensor_list(self) or self.node().kind() == "onnx::SplitToSequence":
return g.op("SequenceLength", self)
sz_0 = size(g, self, g.op("Constant", value_t=torch.LongTensor([0])))
return sym_help._squeeze_helper(g, sz_0, [0])
def __getitem_(g, self, i):
if sym_help._is_tensor_list(self):
# SequenceAt requires that the input be a List of Tensors
return g.op("SequenceAt", self, i)
else:
from torch.onnx.symbolic_opset9 import __getitem_ as getitem
return getitem(g, self, i)
def append(g, self, tensor):
return g.op("SequenceInsert", self, tensor)
def add(g, self, other, alpha=None):
if sym_help._is_value(self) and sym_help._is_tensor_list(self):
tensor_list_node = other.node()
if tensor_list_node.kind() != "prim::ListConstruct":
return _unimplemented("add", "does not support adding dynamic tensor list to another")
tensors = sym_help._unpack_list(other)
l = self
for t in tensors:
l = g.op("SequenceInsert", l, t)
return l
return torch.onnx.symbolic_opset9.add(g, self, other, alpha)
def insert(g, self, pos, tensor):
return g.op("SequenceInsert", self, tensor, pos)
def pop(g, tensor_list, dim):
return g.op("SequenceErase", tensor_list, dim)
def Delete(g, tensor_list, dim):
return g.op("SequenceErase", tensor_list, dim)
def cat(g, tensor_list, dim):
if sym_help._is_packed_list(tensor_list):
from torch.onnx.symbolic_opset9 import cat as cat_opset9
return cat_opset9(g, tensor_list, dim)
else:
dim = sym_help._get_const(dim, 'i', 'dim')
return g.op("ConcatFromSequence", tensor_list, axis_i=dim)
def stack(g, tensor_list, dim):
if sym_help._is_packed_list(tensor_list):
from torch.onnx.symbolic_opset9 import stack as stack_opset9
return stack_opset9(g, tensor_list, dim)
else:
dim = sym_help._get_const(dim, 'i', 'dim')
return g.op("ConcatFromSequence", tensor_list, axis_i=dim, new_axis_i=1)
Loading ...