Learn more  » Push, build, and install  RubyGems npm packages Python packages Maven artifacts PHP packages Go Modules Bower components Debian packages RPM packages NuGet packages

neilisaac / torch   python

Repository URL to install this package:

Version: 1.8.0 

/ include / caffe2 / operators / feature_maps_ops.h

#ifndef CAFFE2_OPERATORS_FEATURE_MAPS_OPS_H_
#define CAFFE2_OPERATORS_FEATURE_MAPS_OPS_H_

#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"

namespace caffe2 {

template <class Context>
class MergeDenseFeatureTensorsOp : public Operator<Context> {
 public:
  USE_OPERATOR_CONTEXT_FUNCTIONS;

  template <class... Args>
  explicit MergeDenseFeatureTensorsOp(Args&&... args)
      : Operator<Context>(std::forward<Args>(args)...) {
    featureIDs_ = this->template GetRepeatedArgument<int64_t>("feature_ids");
  }
  virtual ~MergeDenseFeatureTensorsOp() noexcept {}

  bool RunOnDevice() override {
    return DispatchHelper<
        TensorTypes<bool, int32_t, int64_t, float, double, std::string>>::
        call(this, Input(0));
  }

  template <typename T>
  bool DoRunWithType() {
    auto& dense_data = Input(0);
    int numExamples = dense_data.size(0);
    int numFeatures = dense_data.size(1);

    const bool* inPresenceData = Input(1).template data<bool>();
    int totalNumFeatures = 0;
    for (int exampleIndex = 0; exampleIndex < numExamples; ++exampleIndex) {
      for (int inputIndex = 0; inputIndex < numFeatures; ++inputIndex) {
        if (inPresenceData[exampleIndex * numFeatures + inputIndex]) {
          ++totalNumFeatures;
        }
      }
    }

    auto* outLengths = Output(0, {numExamples}, at::dtype<int32_t>());
    auto* outKeys = Output(1, {totalNumFeatures}, at::dtype<int64_t>());
    auto* outValues = Output(2, {totalNumFeatures}, at::dtype<T>());

    int32_t* outLengthsData = outLengths->template mutable_data<int32_t>();
    int64_t* outKeysData = outKeys->template mutable_data<int64_t>();
    T* outValuesData = outValues->template mutable_data<T>();
    const T* inData =
      Input(0).template data<T>();

    int keysOffset = 0;
    for (int exampleIndex = 0; exampleIndex < numExamples; ++exampleIndex) {
      outLengthsData[exampleIndex] = 0;
      auto offset = exampleIndex * numFeatures;
      for (int inputIndex = 0; inputIndex < numFeatures; ++inputIndex) {
        if (inPresenceData[offset]) {
          ++outLengthsData[exampleIndex];
          outKeysData[keysOffset] = featureIDs_[inputIndex];
          outValuesData[keysOffset] = inData[offset];
          ++keysOffset;
        }
        offset++;
      }
    }
    return true;
  }

 private:
  std::vector<int64_t> featureIDs_;
};

template <class Context>
class MergeSingleScalarFeatureTensorsOp : public Operator<Context> {
 public:
  USE_OPERATOR_CONTEXT_FUNCTIONS;

  template <class... Args>
  explicit MergeSingleScalarFeatureTensorsOp(Args&&... args)
      : Operator<Context>(std::forward<Args>(args)...) {
    numInputs_ = InputSize() / kNumTensorsPerInput;
    featureIDs_ = this->template GetRepeatedArgument<int64_t>("feature_ids");
  }
  virtual ~MergeSingleScalarFeatureTensorsOp() noexcept {}

  bool RunOnDevice() override {
    return DispatchHelper<
        TensorTypes<bool, int32_t, int64_t, float, double, std::string>>::
        call(this, Input(0));
  }

  template <typename T>
  bool DoRunWithType() {
    int numExamples = Input(0).numel();
    int totalNumFeatures = 0;
    for (int inputIndex = 0; inputIndex < numInputs_; ++inputIndex) {
      const bool* inPresenceData =
          Input(kNumTensorsPerInput * inputIndex + 1).template data<bool>();
      for (int exampleIndex = 0; exampleIndex < numExamples; ++exampleIndex) {
        if (inPresenceData[exampleIndex]) {
          ++totalNumFeatures;
        }
      }
    }

    auto* outLengths = Output(0, {numExamples}, at::dtype<int32_t>());
    auto* outKeys = Output(1, {totalNumFeatures}, at::dtype<int64_t>());
    auto* outValues = Output(2, {totalNumFeatures}, at::dtype<T>());

    int32_t* outLengthsData = outLengths->template mutable_data<int32_t>();
    int64_t* outKeysData = outKeys->template mutable_data<int64_t>();
    T* outValuesData = outValues->template mutable_data<T>();

    int keysOffset = 0;
    for (int exampleIndex = 0; exampleIndex < numExamples; ++exampleIndex) {
      outLengthsData[exampleIndex] = 0;
      for (int inputIndex = 0; inputIndex < numInputs_; ++inputIndex) {
        const T* inData =
            Input(kNumTensorsPerInput * inputIndex).template data<T>();
        const bool* inPresenceData =
            Input(kNumTensorsPerInput * inputIndex + 1).template data<bool>();
        if (inPresenceData[exampleIndex]) {
          ++outLengthsData[exampleIndex];
          outKeysData[keysOffset] = featureIDs_[inputIndex];
          outValuesData[keysOffset] = inData[exampleIndex];
          ++keysOffset;
        }
      }
    }
    return true;
  }

 private:
  const int kNumTensorsPerInput = 2;
  int numInputs_;
  std::vector<int64_t> featureIDs_;
};

template <class Context>
class MergeSingleScalarFeatureTensorsGradientOp : public Operator<Context> {
 public:
  USE_OPERATOR_CONTEXT_FUNCTIONS;

  template <class... Args>
  explicit MergeSingleScalarFeatureTensorsGradientOp(Args&&... args)
      : Operator<Context>(std::forward<Args>(args)...) {
    numFeatureInputs_ = InputSize() - 1; // Everything other than values_grad
  }
  virtual ~MergeSingleScalarFeatureTensorsGradientOp() noexcept {}

  bool RunOnDevice() override {
    return DispatchHelper<
        TensorTypes<bool, int32_t, int64_t, float, double, std::string>>::
        call(this, Input(InputSize() - 1));
  }

  template <typename T>
  bool DoRunWithType() {
    int numExamples = Input(0).numel();
    for (int inputIndex = 0; inputIndex < numFeatureInputs_; ++inputIndex) {
      Output(inputIndex)->ResizeLike(Input(inputIndex));
    }

    const T* inValuesGradData = Input(InputSize() - 1).template data<T>();

    T default_value = T();
    int valuesOffset = 0;
    for (int exampleIndex = 0; exampleIndex < numExamples; ++exampleIndex) {
      for (int inputIndex = 0; inputIndex < numFeatureInputs_; ++inputIndex) {
        const bool* inPresenceData = Input(inputIndex).template data<bool>();
        T* outFeatureData = Output(inputIndex)->template mutable_data<T>();
        if (inPresenceData[exampleIndex]) {
          outFeatureData[exampleIndex] = inValuesGradData[valuesOffset];
          ++valuesOffset;
        } else {
          outFeatureData[exampleIndex] = default_value;
        }
      }
    }
    return true;
  }

 private:
  int numFeatureInputs_;
};

template <class Context>
class MergeSingleListFeatureTensorsOp : public Operator<Context> {
 public:
  USE_OPERATOR_CONTEXT_FUNCTIONS;

  template <class... Args>
  explicit MergeSingleListFeatureTensorsOp(Args&&... args)
      : Operator<Context>(std::forward<Args>(args)...) {
    numInputs_ = InputSize() / kNumTensorsPerInput;
    inValuesOffset_.resize(numInputs_);
    featureIDs_ = this->template GetRepeatedArgument<int64_t>("feature_ids");
  }
  virtual ~MergeSingleListFeatureTensorsOp() noexcept {}

  bool RunOnDevice() override {
    return DispatchHelper<
        TensorTypes<bool, int32_t, int64_t, float, double, std::string>>::
        call(this, Input(1));
  }

  template <typename T>
  bool DoRunWithType() {
    int numExamples = Input(0).numel();
    int totalNumFeatures = 0;
    int totalNumValues = 0;
    for (int inputIndex = 0; inputIndex < numInputs_; ++inputIndex) {
      const int32_t* inLengthsData =
          Input(kNumTensorsPerInput * inputIndex).template data<int32_t>();
      const bool* inPresenceData =
          Input(kNumTensorsPerInput * inputIndex + 2).template data<bool>();
      for (int exampleIndex = 0; exampleIndex < numExamples; ++exampleIndex) {
        if (inPresenceData[exampleIndex]) {
          ++totalNumFeatures;
          totalNumValues += inLengthsData[exampleIndex];
        }
      }
    }

    auto* outLengths = Output(0, {numExamples}, at::dtype<int32_t>());
    auto* outKeys = Output(1, {totalNumFeatures}, at::dtype<int64_t>());
    auto* outValuesLengths =
        Output(2, {totalNumFeatures}, at::dtype<int32_t>());
    auto* outValuesValues = Output(3, {totalNumValues}, at::dtype<T>());

    int32_t* outLengthsData = outLengths->template mutable_data<int32_t>();
    int64_t* outKeysData = outKeys->template mutable_data<int64_t>();
    int32_t* outValuesLengthsData =
        outValuesLengths->template mutable_data<int32_t>();
    T* outValuesValuesData = outValuesValues->template mutable_data<T>();

    int keysOffset = 0;
    int valuesOffset = 0;
    for (int inputIndex = 0; inputIndex < numInputs_; ++inputIndex) {
      inValuesOffset_[inputIndex] = 0;
    }
    for (int exampleIndex = 0; exampleIndex < numExamples; ++exampleIndex) {
      outLengthsData[exampleIndex] = 0;
      for (int inputIndex = 0; inputIndex < numInputs_; ++inputIndex) {
        const int32_t* inLengthsData =
            Input(kNumTensorsPerInput * inputIndex).template data<int32_t>();
        const auto& inValues = Input(kNumTensorsPerInput * inputIndex + 1);
        const bool* inPresenceData =
            Input(kNumTensorsPerInput * inputIndex + 2).template data<bool>();
        if (inPresenceData[exampleIndex]) {
          ++outLengthsData[exampleIndex];
          outKeysData[keysOffset] = featureIDs_[inputIndex];
          outValuesLengthsData[keysOffset] = inLengthsData[exampleIndex];
          context_.CopyItemsSameDevice(
              inValues.dtype(),
              inLengthsData[exampleIndex],
              &inValues.template data<T>()[inValuesOffset_[inputIndex]],
              &outValuesValuesData[valuesOffset]);
          valuesOffset += inLengthsData[exampleIndex];
          inValuesOffset_[inputIndex] += inLengthsData[exampleIndex];
          ++keysOffset;
        }
      }
    }
    return true;
  }

 private:
  const int kNumTensorsPerInput = 3;
  int numInputs_;
  std::vector<int> inValuesOffset_;
  std::vector<int64_t> featureIDs_;
};

template <class Context>
class MergeSingleListOrMapFeatureTensorsGradientOp : public Operator<Context> {
 public:
  USE_OPERATOR_CONTEXT_FUNCTIONS;

  template <class... Args>
  explicit MergeSingleListOrMapFeatureTensorsGradientOp(Args&&... args)
      : Operator<Context>(std::forward<Args>(args)...) {
    numFeatureInputs_ = (InputSize() - 1) / kNumTensorsPerInput;
  }
  virtual ~MergeSingleListOrMapFeatureTensorsGradientOp() noexcept {}

  bool RunOnDevice() override {
    return DispatchHelper<
        TensorTypes<bool, int32_t, int64_t, float, double, std::string>>::
        call(this, Input(InputSize() - 1));
  }

  template <typename T>
  bool DoRunWithType() {
    int numExamples = Input(0).numel();
    std::vector<int> outValuesOffset(numFeatureInputs_);
    for (int inputIndex = 0; inputIndex < numFeatureInputs_; ++inputIndex) {
      int inputNumValues = 0;
      const int32_t* inLengthsData =
          Input(kNumTensorsPerInput * inputIndex).template data<int32_t>();
      const bool* inPresenceData =
          Input(kNumTensorsPerInput * inputIndex + 1).template data<bool>();
      for (int exampleIndex = 0; exampleIndex < numExamples; ++exampleIndex) {
        if (inPresenceData[exampleIndex]) {
          inputNumValues += inLengthsData[exampleIndex];
        }
      }
      Output(inputIndex)->Resize(inputNumValues);
    }

    const auto& inValuesValuesGrad = Input(InputSize() - 1);
    const T* inValuesValuesGradData = inValuesValuesGrad.template data<T>();

    int inValuesValuesOffset = 0;
    for (int exampleIndex = 0; exampleIndex < numExamples; ++exampleIndex) {
      for (int inputIndex = 0; inputIndex < numFeatureInputs_; ++inputIndex) {
        const int32_t* inLengthsData =
            Input(kNumTensorsPerInput * inputIndex).template data<int32_t>();
        const bool* inPresenceData =
            Input(kNumTensorsPerInput * inputIndex + 1).template data<bool>();
        if (inPresenceData[exampleIndex]) {
          T* outFeatureValues = Output(inputIndex)->template mutable_data<T>();
          context_.CopyItemsSameDevice(
              inValuesValuesGrad.dtype(),
              inLengthsData[exampleIndex],
              &inValuesValuesGradData[inValuesValuesOffset],
              &outFeatureValues[outValuesOffset[inputIndex]]);
          outValuesOffset[inputIndex] += inLengthsData[exampleIndex];
          inValuesValuesOffset += inLengthsData[exampleIndex];
        }
      }
    }
    return true;
  }

 private:
  const int kNumTensorsPerInput = 2;
  int numFeatureInputs_;
};

template <class Context>
class MergeSingleMapFeatureTensorsOp : public Operator<Context> {
 public:
  USE_OPERATOR_CONTEXT_FUNCTIONS;
Loading ...