aboutsummaryrefslogtreecommitdiff
path: root/ModelToINetworkConverter.hpp
diff options
context:
space:
mode:
authortelsoa01 <telmo.soares@arm.com>2018-08-31 09:31:35 +0100
committertelsoa01 <telmo.soares@arm.com>2018-08-31 09:31:35 +0100
commitce3e84a8d449cbf31cee57e30f0eef6a96c0ce94 (patch)
tree77a769e27879fb712027c990071b061c5e0e3b60 /ModelToINetworkConverter.hpp
parentdeb3bdbe028a59da0759dd7a560387d03a11d322 (diff)
downloadandroid-nn-driver-ce3e84a8d449cbf31cee57e30f0eef6a96c0ce94.tar.gz
Release 18.08
Diffstat (limited to 'ModelToINetworkConverter.hpp')
-rw-r--r--ModelToINetworkConverter.hpp129
1 files changed, 80 insertions, 49 deletions
diff --git a/ModelToINetworkConverter.hpp b/ModelToINetworkConverter.hpp
index 864a2fcc..f0e28970 100644
--- a/ModelToINetworkConverter.hpp
+++ b/ModelToINetworkConverter.hpp
@@ -5,12 +5,11 @@
#pragma once
-#include "HalInterfaces.h"
-#include "NeuralNetworks.h"
-#include "ActivationFunctor.h"
-
#include "ArmnnDriver.hpp"
+#include <NeuralNetworks.h>
+#include <ActivationFunctor.h>
+
#include <armnn/ArmNN.hpp>
#include <armnn/INetwork.hpp>
#include <CpuExecutor.h>
@@ -39,7 +38,8 @@ enum class ConversionResult
class ModelToINetworkConverter
{
public:
- ModelToINetworkConverter(armnn::Compute compute, const V1_0::Model& model,
+ ModelToINetworkConverter(armnn::Compute compute,
+ const ::android::hardware::neuralnetworks::V1_0::Model& model,
const std::set<unsigned int>& forcedUnsupportedOperations);
ConversionResult GetConversionResult() const { return m_ConversionResult; }
@@ -52,82 +52,108 @@ public:
private:
void Convert();
- bool ConvertOperation(const V1_0::Operation& operation);
+ bool ConvertOperation(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
- bool ConvertAdd(const V1_0::Operation& operation);
+ bool ConvertAdd(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
- bool ConvertAveragePool2d(const V1_0::Operation& operation);
+ bool ConvertAveragePool2d(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
- bool ConvertConcatenation(const V1_0::Operation& operation);
+ bool ConvertConcatenation(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
- bool ConvertConv2d(const V1_0::Operation& operation);
+ bool ConvertConv2d(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
- bool ConvertDepthwiseConv2d(const V1_0::Operation& operation);
+ bool ConvertDepthwiseConv2d(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
- bool ConvertFloor(const V1_0::Operation& operation);
+ bool ConvertFloor(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
- bool ConvertFullyConnected(const V1_0::Operation& operation);
+ bool ConvertFullyConnected(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
- bool ConvertLogistic(const V1_0::Operation& operation);
+ bool ConvertLogistic(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
- bool ConvertLocalResponseNormalization(const V1_0::Operation& operation);
+ bool ConvertLocalResponseNormalization(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
- bool ConvertL2Normalization(const V1_0::Operation& operation);
+ bool ConvertL2Normalization(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
- bool ConvertL2Pool2d(const V1_0::Operation& operation);
+ bool ConvertL2Pool2d(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
- bool ConvertMaxPool2d(const V1_0::Operation& operation);
+ bool ConvertMaxPool2d(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
- bool ConvertMul(const V1_0::Operation& operation);
+ bool ConvertMul(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
- bool ConvertReLu(const V1_0::Operation& operation);
+ bool ConvertReLu(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
- bool ConvertReLu1(const V1_0::Operation& operation);
+ bool ConvertReLu1(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
- bool ConvertReLu6(const V1_0::Operation& operation);
+ bool ConvertReLu6(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
- bool ConvertSoftmax(const V1_0::Operation& operation);
+ bool ConvertSoftmax(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
- bool ConvertTanH(const V1_0::Operation& operation);
+ bool ConvertTanH(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
- bool ConvertReshape(const V1_0::Operation& operation);
+ bool ConvertReshape(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
- bool ConvertResizeBilinear(const V1_0::Operation& operation);
+ bool ConvertResizeBilinear(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
- bool ConvertToActivation(const V1_0::Operation& operation, const char* operationName,
+ bool ConvertLstm(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
+
+ bool ConvertToActivation(const ::android::hardware::neuralnetworks::V1_0::Operation& operation,
+ const char* operationName,
const armnn::ActivationDescriptor& activationDesc);
- bool ConvertPooling2d(const V1_0::Operation& operation, const char* name, armnn::PoolingAlgorithm poolType);
+ bool ConvertPooling2d(const ::android::hardware::neuralnetworks::V1_0::Operation& operation,
+ const char* name, armnn::PoolingAlgorithm poolType);
const void* GetOperandValueReadOnlyAddress(const Operand& operand) const;
- const Operand* GetInputOperand(const V1_0::Operation& operation, uint32_t inputIndex) const;
+ const Operand* GetInputOperand(const ::android::hardware::neuralnetworks::V1_0::Operation& operation,
+ uint32_t inputIndex) const;
- const Operand* GetOutputOperand(const V1_0::Operation& operation, uint32_t outputIndex) const;
+ const Operand* GetOutputOperand(const ::android::hardware::neuralnetworks::V1_0::Operation& operation,
+ uint32_t outputIndex) const;
template<typename T>
- bool GetInputScalar(const V1_0::Operation& operation, uint32_t inputIndex, OperandType type, T& outValue) const;
+ bool GetInputScalar(const ::android::hardware::neuralnetworks::V1_0::Operation& operation, uint32_t inputIndex,
+ OperandType type, T& outValue) const;
- bool GetInputInt32(const V1_0::Operation& operation, uint32_t inputIndex, int32_t& outValue) const;
+ bool GetInputInt32(const ::android::hardware::neuralnetworks::V1_0::Operation& operation, uint32_t inputIndex,
+ int32_t& outValue) const;
- bool GetInputFloat32(const V1_0::Operation& operation, uint32_t inputIndex, float& outValue) const;
+ bool GetInputFloat32(const ::android::hardware::neuralnetworks::V1_0::Operation& operation, uint32_t inputIndex,
+ float& outValue) const;
- bool GetInputActivationFunction(const V1_0::Operation& operation, uint32_t inputIndex,
- ActivationFn& outActivationFunction) const;
+ bool GetInputActivationFunctionImpl(const ::android::hardware::neuralnetworks::V1_0::Operation& operation,
+ uint32_t inputIndex,
+ OperandType type,
+ ActivationFn& outActivationFunction) const;
- bool GetInputPaddingScheme(const V1_0::Operation& operation, uint32_t inputIndex,
- android::nn::PaddingScheme& outPaddingScheme) const;
+ bool GetInputActivationFunction(const ::android::hardware::neuralnetworks::V1_0::Operation& operation,
+ uint32_t inputIndex,
+ ActivationFn& outActivationFunction) const;
- LayerInputHandle ConvertToLayerInputHandle(const V1_0::Operation& operation, uint32_t inputIndex);
+ bool GetInputActivationFunctionFromTensor(const ::android::hardware::neuralnetworks::V1_0::Operation& operation,
+ uint32_t inputIndex,
+ ActivationFn& outActivationFunction) const;
- ConstTensorPin ConvertOperationInputToConstTensorPin(const V1_0::Operation& operation, uint32_t inputIndex,
- const armnn::PermutationVector& dimensionMappings = g_DontPermute,
- const armnn::TensorShape* overrideTensorShape = nullptr);
+ bool GetOptionalInputActivation(const ::android::hardware::neuralnetworks::V1_0::Operation& operation,
+ uint32_t inputIndex,
+ ActivationFn& activationFunction) const;
+
+ bool GetInputPaddingScheme(const ::android::hardware::neuralnetworks::V1_0::Operation& operation,
+ uint32_t inputIndex,
+ android::nn::PaddingScheme& outPaddingScheme) const;
+
+ LayerInputHandle ConvertToLayerInputHandle(const ::android::hardware::neuralnetworks::V1_0::Operation& operation,
+ uint32_t inputIndex);
+
+ ConstTensorPin ConvertOperationInputToConstTensorPin(
+ const ::android::hardware::neuralnetworks::V1_0::Operation& operation, uint32_t inputIndex,
+ const armnn::PermutationVector& dimensionMappings = g_DontPermute,
+ const armnn::TensorShape* overrideTensorShape = nullptr, bool optional = false);
ConstTensorPin ConvertOperandToConstTensorPin(const Operand& operand,
const armnn::PermutationVector& dimensionMappings = g_DontPermute,
- const armnn::TensorShape* overrideTensorShape = nullptr);
+ const armnn::TensorShape* overrideTensorShape = nullptr, bool optional = false);
bool GetTensorInt32Values(const Operand& operand, std::vector<int32_t>& outValues) const;
@@ -135,20 +161,25 @@ private:
armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo, ActivationFn activation,
armnn::IConnectableLayer* prevLayer);
+ bool SetupAndTrackLayerOutputSlot(const ::android::hardware::neuralnetworks::V1_0::Operation& operation,
+ uint32_t operationOutputIndex,
+ armnn::IConnectableLayer& layer,
+ uint32_t layerOutputIndex);
- bool SetupAndTrackLayerOutputSlot(const V1_0::Operation& operation, uint32_t outputIndex,
+ bool SetupAndTrackLayerOutputSlot(const ::android::hardware::neuralnetworks::V1_0::Operation& operation,
+ uint32_t outputIndex,
armnn::IConnectableLayer& layer);
// Input data
- armnn::Compute m_Compute;
- const V1_0::Model& m_Model;
- const std::set<unsigned int>& m_ForcedUnsupportedOperations;
+ armnn::Compute m_Compute;
+ const ::android::hardware::neuralnetworks::V1_0::Model& m_Model;
+ const std::set<unsigned int>& m_ForcedUnsupportedOperations;
// Output data
- armnn::INetworkPtr m_Network;
- ConversionResult m_ConversionResult;
- std::map<uint32_t, bool> m_OperationSupported;
+ armnn::INetworkPtr m_Network;
+ ConversionResult m_ConversionResult;
+ std::map<uint32_t, bool> m_OperationSupported;
// Working/intermediate data
std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;