Learn more  » Push, build, and install  RubyGems npm packages Python packages Maven artifacts PHP packages Go Modules Bower components Debian packages RPM packages NuGet packages

neilisaac / torch   python

Repository URL to install this package:

Version: 1.8.0 

/ python / onnx / backend.py

## @package onnx
# Module caffe2.python.onnx.backend

"""Backend for running ONNX on Caffe2

To run this, you will need to have Caffe2 installed as well.
"""
import collections
import sys
import zipfile
import itertools

# When onnx is built against a version of protobuf that is older than
# that which is vendored with caffe2, onnx will crash if caffe2's
# vendored protobuf is loaded first. We can work around this by
# importing onnx first, which will cause it to go out and pick up the
# system protobuf.
import onnx.backend
from caffe2.python import core, workspace, rnn_cell, gru_cell
from caffe2.python.model_helper import ModelHelper
from caffe2.proto import caffe2_pb2
import caffe2.python.utils
import numpy as np
import onnx
from onnx import TensorProto
import onnx.numpy_helper
import onnx.defs
import onnx.optimizer
import onnx.shape_inference
import onnx.utils
from onnx.backend.base import Backend, Device, DeviceType, namedtupledict

from caffe2.python.onnx.workspace import Workspace
from caffe2.python.onnx.backend_rep import Caffe2Rep

import caffe2.python._import_c_extension as C

import warnings

def force_unicode(s):
    try:
        return s.decode('utf-8')
    except AttributeError:
        return s

def get_device_option(device):
    m = {DeviceType.CPU: caffe2_pb2.CPU,
         DeviceType.CUDA: workspace.GpuDeviceType}
    return core.DeviceOption(m[device.type], device.device_id)


class OnnxAttributes(dict):
    """
    This is a more convenient way to work with ONNX/Caffe2 attributes
    that is not the protobuf representation.
    """
    @staticmethod
    def from_onnx(args):
        d = OnnxAttributes()
        for arg in args:
            d[arg.name] = convertAttributeProto(arg)
        return d

    def caffe2(self, kmap=lambda k: k):
        for k, v in self.items():
            if kmap(k) != '':
                yield caffe2.python.utils.MakeArgument(kmap(k), v)

# TODO: Move this into ONNX main library
def convertAttributeProto(onnx_arg):
    """
    Convert an ONNX AttributeProto into an appropriate Python object
    for the type.

    NB: Tensor attribute gets returned as the straight proto.
    """
    if onnx_arg.HasField('f'):
        return onnx_arg.f
    elif onnx_arg.HasField('i'):
        return onnx_arg.i
    elif onnx_arg.HasField('s'):
        return onnx_arg.s
    elif onnx_arg.HasField('t'):
        return onnx_arg.t  # this is a proto!
    elif onnx_arg.HasField('g'):
        return Caffe2Backend._graph_to_net(onnx_arg.g, Caffe2Backend._known_opset_version)
    elif len(onnx_arg.floats):
        return list(onnx_arg.floats)
    elif len(onnx_arg.ints):
        return list(onnx_arg.ints)
    elif len(onnx_arg.strings):
        return list(onnx_arg.strings)
    elif len(onnx_arg.graphs):
        retval = []
        # TODO: this doesn't work with RNN ops
        for g in onnx_arg.graphs:
            retval.append(Caffe2Backend._graph_to_net(g, Caffe2Backend._known_opset_version))
        return retval
    else:
        raise ValueError("Unsupported ONNX attribute: {}".format(onnx_arg))


# TODO: Move this into ONNX main library
class OnnxNode(object):
    """
    Reimplementation of NodeProto from ONNX, but in a form
    more convenient to work with from Python.

    We may temporarily edit these nodes to get them into Caffe2 form,
    before actually translating into the Caffe2 protobuf, since this
    is easier than decomposing everything, and putting it back together
    when we're ready.
    """
    def __init__(self, node):
        self.name = str(node.name)
        self.op_type = str(node.op_type)
        self.attrs = OnnxAttributes.from_onnx(node.attribute)
        self.inputs = list(node.input)
        self.outputs = list(node.output)


Caffe2Ops = collections.namedtuple('Caffe2Ops', ['ops', 'init_ops', 'interface_blobs'])


class Caffe2Backend(Backend):

    # The greatest version of the ONNX operator set which we are aware of.
    # Models whose version is larger than this will cause us to emit a warning
    # that we are attempting to translate on a "best effort" basis.
    #
    # If you increase this, make SURE you cross-reference all BC-breaking
    # changes from one version to the next, and any that you did not
    # implement, mark as broken in _broken_operators
    _known_opset_version = 9

    # This dictionary will record operators which are KNOWN to be
    # broken, so we give a good error message rather than do something
    # bogus and then fail.
    _broken_operators = {
        # 'BrokenOp': version_it_was_broken_in
    }

    # Operators that are different between Caffe2 and
    # ONNX but only in their name.
    # In most cases, this should be empty - as the effort of ONNX is
    # to unify the operator definitions.
    _renamed_operators = {
        'GlobalMaxPool':         'MaxPool',
        'GlobalAveragePool':     'AveragePool',
        'Pad':                   'PadImage',
        'Neg':                   'Negative',
        'BatchNormalization':    'SpatialBN',
        'InstanceNormalization': 'InstanceNorm',
        'MatMul':                'BatchMatMul',
        'Upsample':              'ResizeNearest',
        'Identity':              'Copy',
        'InstanceNormalization': 'InstanceNorm',
        'Equal':                 'EQ',
        'Less':                  'LT',
        'Greater':               'GT',
        'Unsqueeze':             'ExpandDims',
        'Loop':                  'ONNXWhile',
        'Tile':                  'NumpyTile',
        'RandomNormal':          'GaussianFill',
        'RandomUniform':         'UniformFill',
    }

    _global_renamed_attrs = {'kernel_shape': 'kernels'}
    _per_op_renamed_attrs = {
        'Squeeze':              {'axes': 'dims'},
        'Unsqueeze':            {'axes': 'dims'},
        'Transpose':            {'perm': 'axes'},
        'Upsample':             {'mode': '',
                                 'scales': ''},
        'ConvTranspose':        {'output_padding': 'adjs'},
        'Selu':                 {'gamma': 'scale'},
        'If':                   {'then_branch': 'then_net',
                                 'else_branch': 'else_net'},
        'RandomUniform':        {'low': 'min',
                                 'high': 'max'}
    }

    # operators whose behavior is different beyond renaming
    # the value is an attribute of this class that is a
    # function from ToffeIR node_def to caffe2 op_def
    _special_operators = {
        'LSTM': '_create_rnn_variant',
        'GRU': '_create_rnn_variant',
        'RNN': '_create_rnn_variant',
        'Loop': '_create_loop',
        'If': '_create_if',
        'Upsample': '_create_upsample',
        'RandomNormal': '_create_gaussian_fill'
    }

    # Dummy name generator
    _dummy_name = C.DummyName()

    @classmethod
    def dummy_name(cls):
        return cls._dummy_name.new_dummy_name()

    # NB: By default, you will use the LATEST definition of the operator,
    # so this interface MAY make BC-breaking changes.  Specify an
    # opset_version if you don't want this to version.
    @classmethod
    def run_node(cls, node, inputs, device='CPU', opset_version=_known_opset_version, outputs_info=None):
        super(Caffe2Backend, cls).run_node(node, inputs, device=device,
                                           outputs_info=outputs_info, opset_version=opset_version)

        value_infos = []
        device_option = get_device_option(Device(device))
        ws = Workspace()
        with core.DeviceScope(device_option):  # temporary!
            if isinstance(inputs, dict):
                for key, value in inputs.items():
                    ws.FeedBlob(key, value)
                    value_infos.append(onnx.helper.make_tensor_value_info(
                        name=key,
                        elem_type=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[value.dtype],
                        shape=value.shape).SerializeToString())
            else:
                assert len(node.input) == len(inputs), "{}: expected {} but got {}".format(
                    node.op_type, len(node.input), len(inputs))
                for key, value in zip(node.input, inputs):
                    ws.FeedBlob(key, value)
                    value_infos.append(onnx.helper.make_tensor_value_info(
                        name=key,
                        elem_type=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[value.dtype],
                        shape=value.shape).SerializeToString())

            ops = []
            cbackend = C.Caffe2Backend(cls._dummy_name)
            ops_str = cbackend.convert_node(node.SerializeToString(), value_infos, opset_version)
            for s in ops_str[0] + ops_str[1]:
                op = caffe2_pb2.OperatorDef()
                op.ParseFromString(s)
                op.device_option.CopyFrom(device_option)
                ops.append(op)
            ws.RunOperatorsOnce(ops)
            output_values = [ws.FetchBlob(name) for name in node.output]
            return namedtupledict('Outputs', node.output)(*output_values)

    @classmethod
    def _create_tensor_filling_op(cls, onnx_tensor, name=None):
        """
        Given an Onnx TensorProto, translate it into a Caffe2 operator
        which produces the given tensor filling op.
        """
        assert name or onnx_tensor.name
        name = name or onnx_tensor.name

        c2_op = caffe2_pb2.OperatorDef()

        c2_values = c2_op.arg.add()
        c2_values.name = "values"

        def tensor2list(onnx_tensor):
            # Use the onnx.numpy_helper because the data may be raw
            return onnx.numpy_helper.to_array(onnx_tensor).flatten().tolist()

        if onnx_tensor.data_type in [TensorProto.FLOAT]:
            c2_op.type = 'GivenTensorFill'
            c2_values.floats.extend(tensor2list(onnx_tensor))
        elif onnx_tensor.data_type in [TensorProto.DOUBLE]:
            c2_op.type = 'GivenTensorDoubleFill'
            c2_values.floats.extend(tensor2list(onnx_tensor))
        elif onnx_tensor.data_type in [TensorProto.INT64,
                                       TensorProto.UINT32]:
            c2_op.type = 'GivenTensorInt64Fill'
            c2_values.ints.extend(tensor2list(onnx_tensor))
        elif onnx_tensor.data_type in [TensorProto.UINT8,
                                       TensorProto.INT8,
                                       TensorProto.UINT16,
                                       TensorProto.INT16,
                                       TensorProto.INT32]:
            c2_op.type = 'GivenTensorIntFill'
            c2_values.ints.extend(tensor2list(onnx_tensor))
        elif onnx_tensor.data_type == TensorProto.BOOL:
            c2_op.type = 'GivenTensorBoolFill'
            c2_values.ints.extend(tensor2list(onnx_tensor))
        elif onnx_tensor.data_type == TensorProto.STRING:
            c2_op.type = 'GivenTensorStringFill'
            c2_values.strings.extend(onnx_tensor.string_data)
        else:
            raise RuntimeError(
                "unrecognized tensor type {}".format(onnx_tensor.data_type))

        c2_shape = c2_op.arg.add()
        c2_shape.name = "shape"
        c2_shape.ints.extend(onnx_tensor.dims)

        c2_op.output.append(name)

        return c2_op

    @classmethod
    def _rnn_reform_weights(cls, reforms, name, hidden_size, init_net, gates, reorder_indices):
        for name_from, name_to, do_concat, extra_dims in reforms:
            gate_blobs = ['%s/%s_%s' % (name, prefix, name_to) for prefix in gates]
            for i, x in enumerate(gate_blobs):
                dim0 = i * hidden_size, (i+1) * hidden_size
                starts, ends = zip(dim0, *extra_dims)
                init_net.Slice(name_from, x, starts=starts, ends=ends)
            if do_concat:
                reordered_gate_blobs = [gate_blobs[i] for i in reorder_indices]
                init_net.Concat(reordered_gate_blobs, ['%s/%s' % (name, name_to), cls.dummy_name()], axis=0)

    @classmethod
    def _make_rnn_direction(cls, input_blob, B, W, R, initial_states_and_names, sequence_lens,
                            pred_mh, init_net,
                            input_size, hidden_size, num_gates, direction_offset,
                            Bi, Br, W_, R_,
                            reform, make_cell, keep_outputs):
        name = cls.dummy_name()

        # input and recurrence biases are squashed together in onnx
        # but not in caffe2
        gates_hidden_size = num_gates * hidden_size
        bias_offset = 2 * direction_offset * gates_hidden_size
        weight_offset = direction_offset * gates_hidden_size
        Bi = init_net.Slice(B, name + Bi,
                            starts=[bias_offset + 0 * gates_hidden_size],
                            ends  =[bias_offset + 1 * gates_hidden_size])
        Br = init_net.Slice(B, name + Br,
                            starts=[bias_offset + 1 * gates_hidden_size],
                            ends  =[bias_offset + 2 * gates_hidden_size])
        W_ = init_net.Slice(W, name + W_,
                            starts=[weight_offset + 0 * gates_hidden_size, 0],
                            ends  =[weight_offset + 1 * gates_hidden_size,-1])
        R_ = init_net.Slice(R, name + R_,
                            starts=[weight_offset + 0 * gates_hidden_size, 0],
                            ends  =[weight_offset + 1 * gates_hidden_size,-1])

        initial_states_sliced = []
        for initial_state, name_suffix in initial_states_and_names:
            initial_states_sliced.append(
                pred_mh.net.Slice(initial_state, name + name_suffix,
                                  starts=[direction_offset + 0, 0, 0],
                                  ends  =[direction_offset + 1,-1,-1]))

        if direction_offset == 1:
            if sequence_lens is not None:
                seq_lens_for_reverse = sequence_lens
            else:
Loading ...