Learn more  » Push, build, and install  RubyGems npm packages Python packages Maven artifacts PHP packages Go Modules Bower components Debian packages RPM packages NuGet packages

neilisaac / torch   python

Repository URL to install this package:

/ onnx / utils.py


r"""
The torch.onnx module contains functions to export models into the ONNX
IR format.  These models can be loaded with the ONNX library and then
converted to models which run on other deep learning frameworks.
"""

import torch
import torch.jit
import torch.autograd
import torch.serialization
import re
from torch._six import container_abcs
import contextlib
import numbers
import warnings
from torch._six import string_classes
from torch.jit import _unique_state_dict
from torch.onnx import ONNX_ARCHIVE_MODEL_PROTO_NAME, ExportTypes, OperatorExportTypes, TrainingMode
from torch._C import ListType, OptionalType, _propagate_and_assign_input_shapes, _check_onnx_proto
from typing import Union, Tuple, List


# the flag to tell the user whether it's in the middle of ONNX export or not
__IN_ONNX_EXPORT = False


def is_in_onnx_export():
    global __IN_ONNX_EXPORT
    return __IN_ONNX_EXPORT

# Skip check due to cannot import IValue from torch._C
_params_dict = {}  # type: ignore

@contextlib.contextmanager
def select_model_mode_for_export(model, mode):
    if not isinstance(model, torch.jit.ScriptFunction):
        is_originally_training = model.training

        if mode is None:
            mode = TrainingMode.EVAL
            # if the model is in training mode but the user did not specify
            # to export the model in training mode, export the model in inference
            # mode (default) and warn them
            if is_originally_training:
                warnings.warn("You are exporting the model to ONNX while in training mode with "
                              "'train' parameter not specified. The model will default to inference mode export. "
                              "If you wish to export a training amenable ONNX model, specify training=TrainingMode.TRAINING or "
                              "training=TrainingMode.PRESERVE (to preserve the original model state) in torch.onnx.export().")

        # if mode == TrainingMode.EVAL or (mode == TrainingMode.PRESERVE and not is_originally_training) => is_training = False
        is_export_training = False
        # ONNX opset 12 has better support for training amenable models, with updated
        # versions of the dropout and batch_norm operators
        if mode == TrainingMode.TRAINING or (mode == TrainingMode.PRESERVE and is_originally_training):
            from torch.onnx.symbolic_helper import _export_onnx_opset_version
            if _export_onnx_opset_version < 12:
                warnings.warn("You are exporting the model in training mode with onnx opset version {}. "
                              "Opset versions lower than opset 12 will not be able to export nodes such as"
                              "Dropout and BatchNorm correctly.".format(_export_onnx_opset_version))
            is_export_training = True

        from torch.onnx.symbolic_helper import _set_training_mode
        _set_training_mode(is_export_training)
        model.train(is_export_training)
    try:
        yield
    finally:
        if not isinstance(model, torch.jit.ScriptFunction):
            model.train(is_originally_training)


def export(model, args, f, export_params=True, verbose=False, training=None,
           input_names=None, output_names=None, aten=False, export_raw_ir=False,
           operator_export_type=None, opset_version=None, _retain_param_name=True,
           do_constant_folding=True, example_outputs=None, strip_doc_string=True,
           dynamic_axes=None, keep_initializers_as_inputs=None, custom_opsets=None,
           enable_onnx_checker=True, use_external_data_format=False):
    if aten or export_raw_ir:
        assert operator_export_type is None
        assert aten ^ export_raw_ir
        operator_export_type = OperatorExportTypes.ONNX_ATEN if aten else OperatorExportTypes.RAW
    elif operator_export_type is None:
        if torch.onnx.PYTORCH_ONNX_CAFFE2_BUNDLE:
            operator_export_type = OperatorExportTypes.ONNX_ATEN_FALLBACK
        else:
            operator_export_type = OperatorExportTypes.ONNX
    _export(model, args, f, export_params, verbose, training, input_names, output_names,
            operator_export_type=operator_export_type, opset_version=opset_version,
            _retain_param_name=_retain_param_name, do_constant_folding=do_constant_folding,
            example_outputs=example_outputs, strip_doc_string=strip_doc_string,
            dynamic_axes=dynamic_axes, keep_initializers_as_inputs=keep_initializers_as_inputs,
            custom_opsets=custom_opsets, enable_onnx_checker=enable_onnx_checker,
            use_external_data_format=use_external_data_format)


def _is_constant_tensor_list(node):
    if node.kind() != "prim::Constant":
        return False
    output_type = node.output().type()
    if output_type.isSubtypeOf(ListType.ofTensors()):
        return True
    if output_type.isSubtypeOf(ListType(OptionalType.ofTensor())):
        return True

# ONNX can't handle constants that are lists of tensors, which can
# get generated in constant prop. So we split them back into prim::ListConstructs
def _split_tensor_list_constants(g, block):
    for node in block.nodes():
        for subblock in node.blocks():
            _split_tensor_list_constants(g, subblock)
        if _is_constant_tensor_list(node):
            inputs = []
            for val in node.output().toIValue():
                input = g.insertConstant(val)
                input.node().moveBefore(node)
                inputs.append(input)

            lc = (g.create("prim::ListConstruct", inputs)
                  .insertBefore(node)
                  .output()
                  .setType(ListType.ofTensors()))
            node.output().replaceAllUsesWith(lc)


def _optimize_graph(graph, operator_export_type, _disable_torch_constant_prop=False, fixed_batch_size=False,
                    params_dict=None, use_new_jit_passes=True, dynamic_axes=None, input_names=None, module=None):
    # Inline everything
    torch._C._jit_pass_inline(graph)

    # Remove fork/wait nodes
    torch._C._jit_pass_inline_fork_wait(graph)
    torch._C._jit_pass_lint(graph)

    if use_new_jit_passes:
        torch._C._jit_pass_lower_all_tuples(graph)
        torch._C._jit_pass_onnx_remove_inplace_ops_for_onnx(graph, module)
    else:
        torch._C._jit_pass_remove_inplace_ops(graph)

    # we record now record some ops like ones/zeros
    # into a trace where we previously recorded constants
    # use constant prop to maintain our current level of onnx support
    # without implementing symbolics for all of them
    if _disable_torch_constant_prop is False:
        torch._C._jit_pass_constant_propagation(graph)
    _split_tensor_list_constants(graph, graph)
    # run dce to eliminate dead parts of the graph that might have been
    # left behind by things like symbolic_override
    torch._C._jit_pass_dce(graph)
    torch._C._jit_pass_lint(graph)

    torch._C._jit_pass_canonicalize_graph_fuser_ops(graph)
    torch._C._jit_pass_lint(graph)

    torch._C._jit_pass_peephole(graph, True)
    torch._C._jit_pass_fuse_addmm(graph)
    torch._C._jit_pass_lint(graph)

    if operator_export_type != OperatorExportTypes.RAW:
        torch._C._jit_pass_peephole(graph, True)
        torch._C._jit_pass_lower_all_tuples(graph)
        # in _jit_pass_onnx, symbolic functions are called for each node for conversion.
        # However, there are nodes that cannot be converted without additional context.
        # For example, the number of outputs from split (and whether it is static or dynamic) is unknown
        # until the point where it is unpacked by listUnpack node.
        # This pass does a preprocess, and prepares the nodes such that enough context can be received
        # by the symbolic function.
        torch._C._jit_pass_onnx_preprocess(graph)
        # _prepare_inplace_ops makes the IR invalid for JIT passes / alias db
        torch._C._jit_pass_onnx_prepare_inplace_ops_for_onnx(graph)

        # onnx does not support tuples, so try to remove them
        torch._C._jit_pass_lint(graph)

        # onnx only supports tensors, but 1 / 2 = 0.5 and tensor(1) / tensor(2) = 0
        torch._C._jit_pass_prepare_division_for_onnx(graph)

        torch._C._jit_pass_onnx_remove_print(graph)
        torch._C._jit_pass_onnx_preprocess_caffe2(graph)

        if operator_export_type == OperatorExportTypes.ONNX_ATEN_FALLBACK:
            torch.onnx.symbolic_helper._quantized_ops.clear()
            # Unpack quantized weights for conv and linear ops and insert into graph.
            torch._C._jit_pass_onnx_unpack_quantized_weights(graph, params_dict)
            # Insert permutes before and after each conv op to ensure correct order.
            torch._C._jit_pass_onnx_quantization_insert_permutes(graph, params_dict)

            # Find consecutive permutes that are no-ops and remove them.
            torch._C._jit_pass_custom_pattern_based_rewrite_graph("""
            graph(%Pi):
                %Pq = quantized::nhwc2nchw(%Pi)
                %Pr = quantized::nchw2nhwc(%Pq)
                return (%Pr)""", """
            graph(%Ri):
                return (%Ri)""", graph)

        # onnx only supports tensors, so we turn all out number types into tensors
        torch._C._jit_pass_erase_number_types(graph)

        from torch.onnx.symbolic_helper import _onnx_shape_inference
        if _onnx_shape_inference:
            input_names = [] if input_names is None else input_names
            dynamic_axes = {} if dynamic_axes is None else dynamic_axes
            torch._C._jit_pass_onnx_set_dynamic_input_shape(graph, dynamic_axes, input_names)
        graph = torch._C._jit_pass_onnx(graph, operator_export_type)
        torch._C._jit_pass_lint(graph)

        torch._C._jit_pass_onnx_scalar_type_analysis(graph)
        torch._C._jit_pass_lint(graph)

        if dynamic_axes is None or not bool(dynamic_axes):
            torch._C._jit_pass_onnx_fold_if(graph)

        from torch.onnx.symbolic_helper import _export_onnx_opset_version
        torch._C._jit_pass_onnx_peephole(graph, _export_onnx_opset_version, fixed_batch_size)
        torch._C._jit_pass_lint(graph)

    # graph is not a valid jit graph anymore because types have been replaced
    # (e.g. int with Tensor), so it now contains operators that don't actually
    # exist. We can't run normal dead code elimination because it'd fail trying
    # to look up if an operator has side effects, but we can run a dead code
    # elimination variant that doesn't need to look up if an op has side effects.
    torch._C._jit_pass_dce_allow_deleting_nodes_with_side_effects(graph)
    torch._C._jit_pass_lint(graph)
    graph = torch._C._jit_pass_canonicalize(graph)
    torch._C._jit_pass_lint(graph)
    from torch.onnx.symbolic_helper import _onnx_shape_inference, _export_onnx_opset_version
    if _onnx_shape_inference:
        torch._C._jit_pass_onnx_graph_shape_type_inference(graph, params_dict, _export_onnx_opset_version)
    return graph


# We accept dictionnaries and strings as ONNX inputs,
# but they should be only for configuration use.
# we detect here if these inputs are modified, and if so
# we warn the user that the changes won't take effect in the
# traced ONNX graph
def warn_on_static_input_change(input_states):
    for input, traced_input in zip(input_states[0], input_states[1]):
        if isinstance(input, dict):
            if list(input.keys()) != list(traced_input.keys()):
                warning = "We detected that you are modifying a dictionnary that is an input to your " \
                          "model. " \
                          "Note that dictionaries are allowed as inputs in ONNX but they should be " \
                          "handled with care. " \
                          "Usages of dictionaries is not recommended, and should not be used except " \
                          "for configuration use. " \
                          "Also note that the order and values of the keys must remain the same. "
                warnings.warn(warning)
        elif isinstance(input, str):
            if input != traced_input:
                warning = "The model seems to have string inputs/outputs. " \
                          "Note that strings will not appear as inputs/outputs of the ONNX graph. "
                warnings.warn(warning)


def _resolve_args_by_export_type(arg_name, arg_value, operator_export_type):
    # This helper method resolves the arguments that are ignored when export_type != operator_export_type.ONNX
    if operator_export_type is not operator_export_type.ONNX:
        if arg_value is True:
            warnings.warn("`{}' can be set to True only when 'operator_export_type' is "
                          "`ONNX`. Since 'operator_export_type' is not set to 'ONNX', "
                          "`{}` argument will be ignored.".format(arg_name, arg_name))
        arg_value = False
    return arg_value


def _decide_keep_init_as_input(keep_initializers_as_inputs, operator_export_type,
                               opset_version):
    # This method encapsulates the logic to decide whether the initializers in the graph
    # should be listed as ONNX graph inputs (i.e., whether to choose ONNX IR v3 or v4).
    # If keep_initializers_as_inputs is not specified (None), then we decide whether to keep
    # intializers as graph inputs (val_keep_init_as_ip) based on export type. If export type
    # is ONNX, then do not keep initializers as input (val_keep_init_as_ip=False). For all other
    # export types keep initializers as input (val_keep_init_as_ip=True).
    # If keep_initializers_as_inputs is specified, then respect it. Unless opset version <= 8,
    # in which case it must be ignored because for opset version <= 8, all initializers MUST be
    # part of graph input (only ONNX IR v3 is allowed), i.e. val_keep_init_as_ip=True.

    # Special handling is needed for opset version 8 or lower, because irrespective
    # of user input for keep_initializers_as_inputs, the graph must follow ONNX IR v3
    # semantics, i.e. all intializers must be listed as ONNX graph input.
    if opset_version < 9:
        if keep_initializers_as_inputs is False:
            warnings.warn("Setting 'keep_initializers_as_inputs=False' for opset version"
                          "8 or lower would lead to an invalid ONNX graph. Therefore, "
                          "'keep_initializers_as_inputs=False' is ignored during export."
                          "Exported model will have initialiers as graph inputs (compliant "
                          " to ONNX IR v3).")
        return True  # i.e. True == initializers are part of graph input (ONNX IR v3)
    val_keep_init_as_ip = True if keep_initializers_as_inputs is None else keep_initializers_as_inputs
    if keep_initializers_as_inputs is None and operator_export_type is OperatorExportTypes.ONNX:
        val_keep_init_as_ip = False
    return val_keep_init_as_ip


def _decide_add_node_names(add_node_names, operator_export_type):
    return _resolve_args_by_export_type("add_node_names", add_node_names, operator_export_type)


def _decide_constant_folding(do_constant_folding, operator_export_type, training):
    do_constant_folding = _resolve_args_by_export_type("do_constant_folding", do_constant_folding, operator_export_type)
    if do_constant_folding and (training is not None and training is not TrainingMode.EVAL):
        warnings.warn("It is recommended that constant folding be turned off ('do_constant_folding=False') "
                      "when exporting the model in training-amenable mode, i.e. with 'training=TrainingMode.TRAIN' "
                      "or 'training=TrainingMode.PRESERVE' (when model is in training mode). Otherwise, some "
                      "learnable model parameters may not translate correctly in the exported ONNX model "
                      "because constant folding mutates model parameters. Please consider "
                      "turning off constant folding or setting the training=TrainingMode.EVAL.")
    return do_constant_folding


def _decide_external_data_format(use_external_data_format, operator_export_type, f):
    val_use_external_data_format = _resolve_args_by_export_type("use_external_data_format",
                                                                use_external_data_format,
                                                                operator_export_type)
    # f can be a non-string in regular-sized model export case, but for large model export, f must be a non-empty
    # string specifying the location of the model. For large model cases, if f is not a non-empty string,
    # then this method returns an empty string, which is an error condition for the large model export code
    # path later (but not for regular model export code path).
    model_file_location = f if val_use_external_data_format and isinstance(f, str) else str()
    return val_use_external_data_format, model_file_location

def _decide_input_format(model, args):
    import inspect
    try:
        sig = inspect.signature(model.forward)
        ordered_list_keys = list(sig.parameters.keys())
        if isinstance(args[-1], dict):
            args_dict = args[-1]
            args = list(args)[:-1]
            n_nonkeyword = len(args)
            for optional_arg in ordered_list_keys[n_nonkeyword:]:
                if optional_arg in args_dict:
                    args.append(args_dict[optional_arg])
                # Check if this arg has a default value
                else:
                    param = sig.parameters[optional_arg]
                    if param.default is param.empty:
                        args.append(None)
                    else:
                        args.append(param.default)
            args = tuple(args)
        return args
Loading ...