From a15dc11fd7bf3ad49e752ec75157b731287fe46d Mon Sep 17 00:00:00 2001 From: arovir01 Date: Mon, 3 Sep 2018 17:12:56 +0100 Subject: IVGCVSW-1799: Add converter method for DIV to ModelToINetworkConverter Change-Id: I64fcdb8ac2fd6f9ca0d2811ed2f98008c6396c15 --- ModelToINetworkConverter.cpp | 162 ++++++++++++++++++++++++++++++------------- ModelToINetworkConverter.hpp | 77 +++++++++++--------- 2 files changed, 158 insertions(+), 81 deletions(-) diff --git a/ModelToINetworkConverter.cpp b/ModelToINetworkConverter.cpp index 4a7b4014..461a8cdb 100644 --- a/ModelToINetworkConverter.cpp +++ b/ModelToINetworkConverter.cpp @@ -483,8 +483,8 @@ ModelToINetworkConverter::ModelToINetworkConverter(armnn::Compute co template void ModelToINetworkConverter::Convert() { - using Model = typename HalVersion::Model; - ALOGV("ModelToINetworkConverter::Convert(): %s", GetModelSummary(m_Model).c_str()); + using HalModel = typename HalVersion::Model; + ALOGV("ModelToINetworkConverter::Convert(): %s", GetModelSummary(m_Model).c_str()); // map the memory pool into shared pointers m_MemPools.clear(); @@ -665,13 +665,66 @@ bool ModelToINetworkConverter::ConvertOperation(const neuralnetworks { switch (operation.type) { - // TODO: provide cases for converting the new v1.1 operations + case neuralnetworks::V1_1::OperationType::DIV: + return ConvertDiv(operation); default: - return Fail("%s: Operation type %s not supported in ArmnnDriver", - __func__, toString(operation.type).c_str()); + return Fail("%s: Operation type %s not supported in ArmnnDriver", + __func__, toString(operation.type).c_str()); } } } + +template +bool ModelToINetworkConverter::ConvertDiv(const neuralnetworks::V1_1::Operation& operation) +{ + LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0); + LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1); + + if (!input0.IsValid() || !input1.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + // The FuseActivation parameter is always the input index 2 + // and it should be optional + ActivationFn activationFunction; + if (!GetOptionalInputActivation(operation, 2, activationFunction)) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const Operand* outputOperand = GetOutputOperand(operation, 0); + if (!outputOperand) + { + return false; + } + + const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand); + + if (!IsLayerSupported(__func__, + armnn::IsDivisionSupported, + m_Compute, + input0.GetTensorInfo(), + input1.GetTensorInfo(), + outInfo)) + { + return false; + } + + armnn::IConnectableLayer* const startLayer = m_Network->AddDivisionLayer(); + armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer); + + const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo(); + const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo(); + + if (endLayer) + { + BroadcastTensor(input0, input1, startLayer, *m_Network); + return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer); + } + + return Fail("%s: ProcessActivation failed", __func__); +} #endif template @@ -2172,8 +2225,9 @@ const void* ModelToINetworkConverter::GetOperandValueReadOnlyAddress } template -const Operand* ModelToINetworkConverter::GetInputOperand(const neuralnetworks::V1_0::Operation& operation, - uint32_t inputIndex) const +template +const Operand* ModelToINetworkConverter::GetInputOperand(const HalOperation& operation, + uint32_t inputIndex) const { if (inputIndex >= operation.inputs.size()) { @@ -2186,8 +2240,9 @@ const Operand* ModelToINetworkConverter::GetInputOperand(const neura } template -const Operand* ModelToINetworkConverter::GetOutputOperand(const neuralnetworks::V1_0::Operation& operation, - uint32_t outputIndex) const +template +const Operand* ModelToINetworkConverter::GetOutputOperand(const HalOperation& operation, + uint32_t outputIndex) const { if (outputIndex >= operation.outputs.size()) { @@ -2200,10 +2255,11 @@ const Operand* ModelToINetworkConverter::GetOutputOperand(const neur } template -template -bool ModelToINetworkConverter::GetInputScalar(const neuralnetworks::V1_0::Operation& operation, - uint32_t inputIndex, - OperandType type, T& outValue) const +template +bool ModelToINetworkConverter::GetInputScalar(const HalOperation& operation, + uint32_t inputIndex, + OperandType type, + T& outValue) const { const Operand* operand = GetInputOperand(operation, inputIndex); if (!operand) @@ -2234,25 +2290,29 @@ bool ModelToINetworkConverter::GetInputScalar(const neuralnetworks:: } template -bool ModelToINetworkConverter::GetInputInt32(const neuralnetworks::V1_0::Operation& operation, - uint32_t inputIndex, int32_t& outValue) const +template +bool ModelToINetworkConverter::GetInputInt32(const HalOperation& operation, + uint32_t inputIndex, + int32_t& outValue) const { return GetInputScalar(operation, inputIndex, OperandType::INT32, outValue); } template -bool ModelToINetworkConverter::GetInputFloat32(const neuralnetworks::V1_0::Operation& operation, - uint32_t inputIndex, float& outValue) const +template +bool ModelToINetworkConverter::GetInputFloat32(const HalOperation& operation, + uint32_t inputIndex, + float& outValue) const { return GetInputScalar(operation, inputIndex, OperandType::FLOAT32, outValue); } template -bool ModelToINetworkConverter::GetInputActivationFunctionImpl( - const neuralnetworks::V1_0::Operation& operation, - uint32_t inputIndex, - OperandType type, - ActivationFn& outActivationFunction) const +template +bool ModelToINetworkConverter::GetInputActivationFunctionImpl(const HalOperation& operation, + uint32_t inputIndex, + OperandType type, + ActivationFn& outActivationFunction) const { if (type != OperandType::INT32 && type != OperandType::TENSOR_INT32) { @@ -2273,17 +2333,18 @@ bool ModelToINetworkConverter::GetInputActivationFunctionImpl( } template -bool ModelToINetworkConverter::GetInputActivationFunction( - const neuralnetworks::V1_0::Operation& operation, - uint32_t inputIndex, - ActivationFn& outActivationFunction) const +template +bool ModelToINetworkConverter::GetInputActivationFunction(const HalOperation& operation, + uint32_t inputIndex, + ActivationFn& outActivationFunction) const { return GetInputActivationFunctionImpl(operation, inputIndex, OperandType::INT32, outActivationFunction); } template +template bool ModelToINetworkConverter::GetInputActivationFunctionFromTensor( - const neuralnetworks::V1_0::Operation& operation, + const HalOperation& operation, uint32_t inputIndex, ActivationFn& outActivationFunction) const { @@ -2292,9 +2353,10 @@ bool ModelToINetworkConverter::GetInputActivationFunctionFromTensor( } template -bool ModelToINetworkConverter::GetOptionalInputActivation(const neuralnetworks::V1_0::Operation& operation, - uint32_t inputIndex, - ActivationFn& activationFunction) const +template +bool ModelToINetworkConverter::GetOptionalInputActivation(const HalOperation& operation, + uint32_t inputIndex, + ActivationFn& activationFunction) const { if (operation.inputs.size() <= inputIndex) { @@ -2311,9 +2373,10 @@ bool ModelToINetworkConverter::GetOptionalInputActivation(const neur } template -bool ModelToINetworkConverter::GetInputPaddingScheme(const neuralnetworks::V1_0::Operation& operation, - uint32_t inputIndex, - android::nn::PaddingScheme& outPaddingScheme) const +template +bool ModelToINetworkConverter::GetInputPaddingScheme(const HalOperation& operation, + uint32_t inputIndex, + PaddingScheme& outPaddingScheme) const { int32_t paddingSchemeAsInt; if (!GetInputInt32(operation, inputIndex, paddingSchemeAsInt)) @@ -2326,9 +2389,9 @@ bool ModelToINetworkConverter::GetInputPaddingScheme(const neuralnet } template -LayerInputHandle ModelToINetworkConverter::ConvertToLayerInputHandle( - const neuralnetworks::V1_0::Operation& operation, - uint32_t inputIndex) +template +LayerInputHandle ModelToINetworkConverter::ConvertToLayerInputHandle(const HalOperation& operation, + uint32_t inputIndex) { const Operand* operand = GetInputOperand(operation, inputIndex); if (!operand) @@ -2397,10 +2460,13 @@ LayerInputHandle ModelToINetworkConverter::ConvertToLayerInputHandle } template +template ConstTensorPin ModelToINetworkConverter::ConvertOperationInputToConstTensorPin( - const neuralnetworks::V1_0::Operation& operation, - uint32_t inputIndex, const armnn::PermutationVector& dimensionMappings, - const armnn::TensorShape* overrideTensorShape, bool optional) + const HalOperation& operation, + uint32_t inputIndex, + const armnn::PermutationVector& dimensionMappings, + const armnn::TensorShape* overrideTensorShape, + bool optional) { const Operand* operand = GetInputOperand(operation, inputIndex); if (!operand) @@ -2550,11 +2616,11 @@ armnn::IConnectableLayer* ModelToINetworkConverter::ProcessActivatio } template -bool ModelToINetworkConverter::SetupAndTrackLayerOutputSlot( - const neuralnetworks::V1_0::Operation& operation, - uint32_t operationOutputIndex, - armnn::IConnectableLayer& layer, - uint32_t layerOutputIndex) +template +bool ModelToINetworkConverter::SetupAndTrackLayerOutputSlot(const HalOperation& operation, + uint32_t operationOutputIndex, + armnn::IConnectableLayer& layer, + uint32_t layerOutputIndex) { const Operand* outputOperand = GetOutputOperand(operation, operationOutputIndex); @@ -2574,10 +2640,10 @@ bool ModelToINetworkConverter::SetupAndTrackLayerOutputSlot( } template -bool ModelToINetworkConverter::SetupAndTrackLayerOutputSlot( - const neuralnetworks::V1_0::Operation& operation, - uint32_t outputIndex, - armnn::IConnectableLayer& layer) +template +bool ModelToINetworkConverter::SetupAndTrackLayerOutputSlot(const HalOperation& operation, + uint32_t outputIndex, + armnn::IConnectableLayer& layer) { return SetupAndTrackLayerOutputSlot(operation, outputIndex, layer, outputIndex); } diff --git a/ModelToINetworkConverter.hpp b/ModelToINetworkConverter.hpp index 6fdcf6bd..040bec6b 100644 --- a/ModelToINetworkConverter.hpp +++ b/ModelToINetworkConverter.hpp @@ -47,7 +47,7 @@ struct HalVersion_1_1 // A helper performing the conversion from an AndroidNN driver Model representation, // to an armnn::INetwork object -template +template class ModelToINetworkConverter { public: @@ -69,6 +69,8 @@ private: #if defined(ARMNN_ANDROID_NN_V1_1) bool ConvertOperation(const ::android::hardware::neuralnetworks::V1_1::Operation& operation); + + bool ConvertDiv(const ::android::hardware::neuralnetworks::V1_1::Operation& operation); #endif bool ConvertOperation(const ::android::hardware::neuralnetworks::V1_0::Operation& operation); @@ -117,79 +119,88 @@ private: bool ConvertToActivation(const ::android::hardware::neuralnetworks::V1_0::Operation& operation, const char* operationName, - const armnn::ActivationDescriptor& activationDesc); + const armnn::ActivationDescriptor& activationDesc); bool ConvertPooling2d(const ::android::hardware::neuralnetworks::V1_0::Operation& operation, const char* name, armnn::PoolingAlgorithm poolType); - const void* GetOperandValueReadOnlyAddress(const Operand& operand) const; - const Operand* GetInputOperand(const ::android::hardware::neuralnetworks::V1_0::Operation& operation, - uint32_t inputIndex) const; + template + const Operand* GetInputOperand(const HalOperation& operation, uint32_t inputIndex) const; - const Operand* GetOutputOperand(const ::android::hardware::neuralnetworks::V1_0::Operation& operation, - uint32_t outputIndex) const; + template + const Operand* GetOutputOperand(const HalOperation& operation, uint32_t outputIndex) const; - template - bool GetInputScalar(const ::android::hardware::neuralnetworks::V1_0::Operation& operation, uint32_t inputIndex, - OperandType type, T& outValue) const; + template + bool GetInputScalar(const HalOperation& operation, uint32_t inputIndex, OperandType type, T& outValue) const; - bool GetInputInt32(const ::android::hardware::neuralnetworks::V1_0::Operation& operation, uint32_t inputIndex, - int32_t& outValue) const; + template + bool GetInputInt32(const HalOperation& operation, uint32_t inputIndex, int32_t& outValue) const; - bool GetInputFloat32(const ::android::hardware::neuralnetworks::V1_0::Operation& operation, uint32_t inputIndex, - float& outValue) const; + template + bool GetInputFloat32(const HalOperation& operation, uint32_t inputIndex, float& outValue) const; - bool GetInputActivationFunctionImpl(const ::android::hardware::neuralnetworks::V1_0::Operation& operation, + template + bool GetInputActivationFunctionImpl(const HalOperation& operation, uint32_t inputIndex, OperandType type, ActivationFn& outActivationFunction) const; - bool GetInputActivationFunction(const ::android::hardware::neuralnetworks::V1_0::Operation& operation, + template + bool GetInputActivationFunction(const HalOperation& operation, uint32_t inputIndex, ActivationFn& outActivationFunction) const; - bool GetInputActivationFunctionFromTensor(const ::android::hardware::neuralnetworks::V1_0::Operation& operation, + template + bool GetInputActivationFunctionFromTensor(const HalOperation& operation, uint32_t inputIndex, ActivationFn& outActivationFunction) const; - bool GetOptionalInputActivation(const ::android::hardware::neuralnetworks::V1_0::Operation& operation, + template + bool GetOptionalInputActivation(const HalOperation& operation, uint32_t inputIndex, ActivationFn& activationFunction) const; - bool GetInputPaddingScheme(const ::android::hardware::neuralnetworks::V1_0::Operation& operation, + template + bool GetInputPaddingScheme(const HalOperation& operation, uint32_t inputIndex, android::nn::PaddingScheme& outPaddingScheme) const; - LayerInputHandle ConvertToLayerInputHandle(const ::android::hardware::neuralnetworks::V1_0::Operation& operation, - uint32_t inputIndex); + template + LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation, uint32_t inputIndex); + template ConstTensorPin ConvertOperationInputToConstTensorPin( - const ::android::hardware::neuralnetworks::V1_0::Operation& operation, uint32_t inputIndex, - const armnn::PermutationVector& dimensionMappings = g_DontPermute, - const armnn::TensorShape* overrideTensorShape = nullptr, bool optional = false); + const HalOperation& operation, + uint32_t inputIndex, + const armnn::PermutationVector& dimensionMappings = g_DontPermute, + const armnn::TensorShape* overrideTensorShape = nullptr, + bool optional = false); - ConstTensorPin ConvertOperandToConstTensorPin(const Operand& operand, + ConstTensorPin ConvertOperandToConstTensorPin( + const Operand& operand, const armnn::PermutationVector& dimensionMappings = g_DontPermute, - const armnn::TensorShape* overrideTensorShape = nullptr, bool optional = false); + const armnn::TensorShape* overrideTensorShape = nullptr, + bool optional = false); bool GetTensorInt32Values(const Operand& operand, std::vector& outValues) const; - - armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo, ActivationFn activation, + armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo, + ActivationFn activation, armnn::IConnectableLayer* prevLayer); - bool SetupAndTrackLayerOutputSlot(const ::android::hardware::neuralnetworks::V1_0::Operation& operation, + template + bool SetupAndTrackLayerOutputSlot(const HalOperation& operation, uint32_t operationOutputIndex, armnn::IConnectableLayer& layer, uint32_t layerOutputIndex); - bool SetupAndTrackLayerOutputSlot(const ::android::hardware::neuralnetworks::V1_0::Operation& operation, + template + bool SetupAndTrackLayerOutputSlot(const HalOperation& operation, uint32_t outputIndex, armnn::IConnectableLayer& layer); - // Input data armnn::Compute m_Compute; const HalModel& m_Model; @@ -201,8 +212,8 @@ private: std::map m_OperationSupported; // Working/intermediate data - std::vector m_OutputSlotForOperand; + std::vector m_OutputSlotForOperand; std::vector m_MemPools; }; -} // armnn_driver +} // armnn_driver \ No newline at end of file -- cgit v1.2.1