aboutsummaryrefslogtreecommitdiff
path: root/ModelToINetworkConverter.hpp
diff options
context:
space:
mode:
authorsurmeh01 <surabhi.mehta@arm.com>2018-07-05 12:06:04 +0100
committersurmeh01 <surabhi.mehta@arm.com>2018-07-05 12:06:04 +0100
commitdeb3bdbe028a59da0759dd7a560387d03a11d322 (patch)
tree869b7ee10d8f1f19a0861e0b552bb453330adf0a /ModelToINetworkConverter.hpp
parent49b9e100bfbb3b8da01472a0ff48b2bd92944e01 (diff)
downloadandroid-nn-driver-deb3bdbe028a59da0759dd7a560387d03a11d322.tar.gz
Release 18.05.02
Diffstat (limited to 'ModelToINetworkConverter.hpp')
-rw-r--r--ModelToINetworkConverter.hpp72
1 files changed, 37 insertions, 35 deletions
diff --git a/ModelToINetworkConverter.hpp b/ModelToINetworkConverter.hpp
index 7ced514b..864a2fcc 100644
--- a/ModelToINetworkConverter.hpp
+++ b/ModelToINetworkConverter.hpp
@@ -9,6 +9,8 @@
#include "NeuralNetworks.h"
#include "ActivationFunctor.h"
+#include "ArmnnDriver.hpp"
+
#include <armnn/ArmNN.hpp>
#include <armnn/INetwork.hpp>
#include <CpuExecutor.h>
@@ -37,7 +39,7 @@ enum class ConversionResult
class ModelToINetworkConverter
{
public:
- ModelToINetworkConverter(armnn::Compute compute, const Model& model,
+ ModelToINetworkConverter(armnn::Compute compute, const V1_0::Model& model,
const std::set<unsigned int>& forcedUnsupportedOperations);
ConversionResult GetConversionResult() const { return m_ConversionResult; }
@@ -50,76 +52,76 @@ public:
private:
void Convert();
- bool ConvertOperation(const Operation& operation);
+ bool ConvertOperation(const V1_0::Operation& operation);
- bool ConvertAdd(const Operation& operation);
+ bool ConvertAdd(const V1_0::Operation& operation);
- bool ConvertAveragePool2d(const Operation& operation);
+ bool ConvertAveragePool2d(const V1_0::Operation& operation);
- bool ConvertConcatenation(const Operation& operation);
+ bool ConvertConcatenation(const V1_0::Operation& operation);
- bool ConvertConv2d(const Operation& operation);
+ bool ConvertConv2d(const V1_0::Operation& operation);
- bool ConvertDepthwiseConv2d(const Operation& operation);
+ bool ConvertDepthwiseConv2d(const V1_0::Operation& operation);
- bool ConvertFloor(const Operation& operation);
+ bool ConvertFloor(const V1_0::Operation& operation);
- bool ConvertFullyConnected(const Operation& operation);
+ bool ConvertFullyConnected(const V1_0::Operation& operation);
- bool ConvertLogistic(const Operation& operation);
+ bool ConvertLogistic(const V1_0::Operation& operation);
- bool ConvertLocalResponseNormalization(const Operation& operation);
+ bool ConvertLocalResponseNormalization(const V1_0::Operation& operation);
- bool ConvertL2Normalization(const Operation& operation);
+ bool ConvertL2Normalization(const V1_0::Operation& operation);
- bool ConvertL2Pool2d(const Operation& operation);
+ bool ConvertL2Pool2d(const V1_0::Operation& operation);
- bool ConvertMaxPool2d(const Operation& operation);
+ bool ConvertMaxPool2d(const V1_0::Operation& operation);
- bool ConvertMul(const Operation& operation);
+ bool ConvertMul(const V1_0::Operation& operation);
- bool ConvertReLu(const Operation& operation);
+ bool ConvertReLu(const V1_0::Operation& operation);
- bool ConvertReLu1(const Operation& operation);
+ bool ConvertReLu1(const V1_0::Operation& operation);
- bool ConvertReLu6(const Operation& operation);
+ bool ConvertReLu6(const V1_0::Operation& operation);
- bool ConvertSoftmax(const Operation& operation);
+ bool ConvertSoftmax(const V1_0::Operation& operation);
- bool ConvertTanH(const Operation& operation);
+ bool ConvertTanH(const V1_0::Operation& operation);
- bool ConvertReshape(const Operation& operation);
+ bool ConvertReshape(const V1_0::Operation& operation);
- bool ConvertResizeBilinear(const Operation& operation);
+ bool ConvertResizeBilinear(const V1_0::Operation& operation);
- bool ConvertToActivation(const Operation& operation, const char* operationName,
+ bool ConvertToActivation(const V1_0::Operation& operation, const char* operationName,
const armnn::ActivationDescriptor& activationDesc);
- bool ConvertPooling2d(const Operation& operation, const char* name, armnn::PoolingAlgorithm poolType);
+ bool ConvertPooling2d(const V1_0::Operation& operation, const char* name, armnn::PoolingAlgorithm poolType);
const void* GetOperandValueReadOnlyAddress(const Operand& operand) const;
- const Operand* GetInputOperand(const Operation& operation, uint32_t inputIndex) const;
+ const Operand* GetInputOperand(const V1_0::Operation& operation, uint32_t inputIndex) const;
- const Operand* GetOutputOperand(const Operation& operation, uint32_t outputIndex) const;
+ const Operand* GetOutputOperand(const V1_0::Operation& operation, uint32_t outputIndex) const;
template<typename T>
- bool GetInputScalar(const Operation& operation, uint32_t inputIndex, OperandType type, T& outValue) const;
+ bool GetInputScalar(const V1_0::Operation& operation, uint32_t inputIndex, OperandType type, T& outValue) const;
- bool GetInputInt32(const Operation& operation, uint32_t inputIndex, int32_t& outValue) const;
+ bool GetInputInt32(const V1_0::Operation& operation, uint32_t inputIndex, int32_t& outValue) const;
- bool GetInputFloat32(const Operation& operation, uint32_t inputIndex, float& outValue) const;
+ bool GetInputFloat32(const V1_0::Operation& operation, uint32_t inputIndex, float& outValue) const;
- bool GetInputActivationFunction(const Operation& operation, uint32_t inputIndex,
+ bool GetInputActivationFunction(const V1_0::Operation& operation, uint32_t inputIndex,
ActivationFn& outActivationFunction) const;
- bool GetInputPaddingScheme(const Operation& operation, uint32_t inputIndex,
+ bool GetInputPaddingScheme(const V1_0::Operation& operation, uint32_t inputIndex,
android::nn::PaddingScheme& outPaddingScheme) const;
- LayerInputHandle ConvertToLayerInputHandle(const Operation& operation, uint32_t inputIndex);
+ LayerInputHandle ConvertToLayerInputHandle(const V1_0::Operation& operation, uint32_t inputIndex);
- ConstTensorPin ConvertOperationInputToConstTensorPin(const Operation& operation, uint32_t inputIndex,
+ ConstTensorPin ConvertOperationInputToConstTensorPin(const V1_0::Operation& operation, uint32_t inputIndex,
const armnn::PermutationVector& dimensionMappings = g_DontPermute,
const armnn::TensorShape* overrideTensorShape = nullptr);
@@ -134,13 +136,13 @@ private:
armnn::IConnectableLayer* prevLayer);
- bool SetupAndTrackLayerOutputSlot(const Operation& operation, uint32_t outputIndex,
+ bool SetupAndTrackLayerOutputSlot(const V1_0::Operation& operation, uint32_t outputIndex,
armnn::IConnectableLayer& layer);
// Input data
armnn::Compute m_Compute;
- const Model& m_Model;
+ const V1_0::Model& m_Model;
const std::set<unsigned int>& m_ForcedUnsupportedOperations;
// Output data