Why Gemfury? Push, build, and install  RubyGems npm packages Python packages Maven artifacts PHP packages Go Modules Debian packages RPM packages NuGet packages

Repository URL to install this package:

Details    
onnxsim / onnxsim.h
Size: Mime:
#pragma once

#include <memory>
#include <optional>
#include <vector>

#include <onnx/onnx_pb.h>

struct ModelExecutor {
  virtual ~ModelExecutor() = default;
  static void set_instance(std::shared_ptr<const ModelExecutor> instance) {
    instance_ = std::move(instance);
  }
  static std::vector<onnx::TensorProto> Run(
      const onnx::ModelProto& model,
      const std::vector<onnx::TensorProto>& inputs) {
    if (instance_ == nullptr) {
      throw std::runtime_error("empty instance");
    }
    return instance_->_Run(model, inputs);
  }

  // public it for pybind11
  virtual std::vector<onnx::TensorProto> _Run(
      const onnx::ModelProto& model,
      const std::vector<onnx::TensorProto>& inputs) const = 0;

 private:
  static std::shared_ptr<const ModelExecutor> instance_;
};

void InitEnv();

onnx::ModelProto Simplify(
    const onnx::ModelProto& model,
    std::optional<std::vector<std::string>> skip_optimizers,
    bool constant_folding, bool shape_inference, size_t tensor_size_threshold);

void SimplifyPath(const std::string& in_path, const std::string& out_path,
                  std::optional<std::vector<std::string>> skip_optimizers,
                  bool constant_folding, bool shape_inference,
                  size_t tensor_size_threshold);