aboutsummaryrefslogtreecommitdiff
path: root/ModelToINetworkConverter.hpp
diff options
context:
space:
mode:
authorarovir01 <Aron.Virginas-Tar@arm.com>2018-09-05 17:03:25 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2018-09-18 12:40:40 +0100
commitb0717b5241a15e3e4d37a1b51b6e5fd9a92a664f (patch)
tree84159d2eb142f12081c494483c07012e8ebee8cb /ModelToINetworkConverter.hpp
parent93e48980920ddcc8c6390fa6cbfdfc9740786617 (diff)
downloadandroid-nn-driver-b0717b5241a15e3e4d37a1b51b6e5fd9a92a664f.tar.gz
IVGCVSW-1806: Refactor Android-NN-Driver ModelToINetworkConverter
* Moved conversion logic into new V1_0 and V1_1 HalPolicy classes * Extracted common helper functions into ConversionUtils class Change-Id: I1ab50edc266dd528c0cb22a5cd1aa65e103674d9
Diffstat (limited to 'ModelToINetworkConverter.hpp')
-rw-r--r--ModelToINetworkConverter.hpp163
1 files changed, 7 insertions, 156 deletions
diff --git a/ModelToINetworkConverter.hpp b/ModelToINetworkConverter.hpp
index 5cdfeb59..a3758fd5 100644
--- a/ModelToINetworkConverter.hpp
+++ b/ModelToINetworkConverter.hpp
@@ -6,27 +6,15 @@
#pragma once
#include "ArmnnDriver.hpp"
-#include "ArmnnDriverImpl.hpp"
-
-#include <NeuralNetworks.h>
-#include <ActivationFunctor.h>
+#include "ConversionUtils.hpp"
#include <armnn/ArmNN.hpp>
-#include <armnn/INetwork.hpp>
-#include <CpuExecutor.h>
-
-#include "Utils.hpp"
-#include <memory>
-#include <vector>
#include <set>
namespace armnn_driver
{
-class ConstTensorPin;
-class LayerInputHandle;
-
enum class ConversionResult
{
Success,
@@ -34,13 +22,13 @@ enum class ConversionResult
UnsupportedFeature
};
-// A helper performing the conversion from an AndroidNN driver Model representation,
+// A helper template class performing the conversion from an AndroidNN driver Model representation,
// to an armnn::INetwork object
-template<typename HalVersion>
+template<typename HalPolicy>
class ModelToINetworkConverter
{
public:
- using HalModel = typename HalVersion::Model;
+ using HalModel = typename HalPolicy::Model;
ModelToINetworkConverter(armnn::Compute compute,
const HalModel& model,
@@ -49,160 +37,23 @@ public:
ConversionResult GetConversionResult() const { return m_ConversionResult; }
// Returns the ArmNN INetwork corresponding to the input model, if preparation went smoothly, nullptr otherwise.
- armnn::INetwork* GetINetwork() const { return m_Network.get(); }
+ armnn::INetwork* GetINetwork() const { return m_Data.m_Network.get(); }
bool IsOperationSupported(uint32_t operationIndex) const;
private:
void Convert();
-#if defined(ARMNN_ANDROID_NN_V1_1) // Using ::android::hardware::neuralnetworks::V1_1.
- bool ConvertOperation(const ::android::hardware::neuralnetworks::V1_1::Operation& operation);
-
- bool ConvertDiv(const ::android::hardware::neuralnetworks::V1_1::Operation& operation);
-#endif
-
- bool ConvertOperation(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
- bool ConvertAdd(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
- bool ConvertAveragePool2d(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
- bool ConvertConcatenation(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
- bool ConvertConv2d(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
- bool ConvertDepthwiseConv2d(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
- bool ConvertFloor(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
- bool ConvertFullyConnected(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
- bool ConvertLogistic(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
- bool ConvertLocalResponseNormalization(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
- bool ConvertL2Normalization(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
- bool ConvertL2Pool2d(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
- bool ConvertMaxPool2d(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
- bool ConvertMul(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
- bool ConvertReLu(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
- bool ConvertReLu1(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
- bool ConvertReLu6(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
- bool ConvertSoftmax(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
- bool ConvertTanH(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
- bool ConvertReshape(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
- bool ConvertResizeBilinear(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
- bool ConvertLstm(const ::android::hardware::neuralnetworks::V1_0::Operation& operation);
-
- bool ConvertToActivation(const ::android::hardware::neuralnetworks::V1_0::Operation& operation,
- const char* operationName,
- const armnn::ActivationDescriptor& activationDesc);
-
- bool ConvertPooling2d(const ::android::hardware::neuralnetworks::V1_0::Operation& operation,
- const char* name, armnn::PoolingAlgorithm poolType);
-
- const void* GetOperandValueReadOnlyAddress(const Operand& operand) const;
-
- template<typename HalOperation>
- const Operand* GetInputOperand(const HalOperation& operation, uint32_t inputIndex) const;
-
- template<typename HalOperation>
- const Operand* GetOutputOperand(const HalOperation& operation, uint32_t outputIndex) const;
-
- template<typename HalOperation, typename T>
- bool GetInputScalar(const HalOperation& operation, uint32_t inputIndex, OperandType type, T& outValue) const;
-
- template<typename HalOperation>
- bool GetInputInt32(const HalOperation& operation, uint32_t inputIndex, int32_t& outValue) const;
-
- template<typename HalOperation>
- bool GetInputFloat32(const HalOperation& operation, uint32_t inputIndex, float& outValue) const;
-
- template<typename HalOperation>
- bool GetInputActivationFunctionImpl(const HalOperation& operation,
- uint32_t inputIndex,
- OperandType type,
- ActivationFn& outActivationFunction) const;
-
- template<typename HalOperation>
- bool GetInputActivationFunction(const HalOperation& operation,
- uint32_t inputIndex,
- ActivationFn& outActivationFunction) const;
-
- template<typename HalOperation>
- bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
- uint32_t inputIndex,
- ActivationFn& outActivationFunction) const;
-
- template<typename HalOperation>
- bool GetOptionalInputActivation(const HalOperation& operation,
- uint32_t inputIndex,
- ActivationFn& activationFunction) const;
-
- template<typename HalOperation>
- bool GetInputPaddingScheme(const HalOperation& operation,
- uint32_t inputIndex,
- android::nn::PaddingScheme& outPaddingScheme) const;
-
- template<typename HalOperation>
- LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation, uint32_t inputIndex);
-
- template<typename HalOperation>
- ConstTensorPin ConvertOperationInputToConstTensorPin(
- const HalOperation& operation,
- uint32_t inputIndex,
- const armnn::PermutationVector& dimensionMappings = g_DontPermute,
- const armnn::TensorShape* overrideTensorShape = nullptr,
- bool optional = false);
-
- ConstTensorPin ConvertOperandToConstTensorPin(
- const Operand& operand,
- const armnn::PermutationVector& dimensionMappings = g_DontPermute,
- const armnn::TensorShape* overrideTensorShape = nullptr,
- bool optional = false);
-
- bool GetTensorInt32Values(const Operand& operand, std::vector<int32_t>& outValues) const;
-
- armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
- ActivationFn activation,
- armnn::IConnectableLayer* prevLayer);
-
- template<typename HalOperation>
- bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
- uint32_t operationOutputIndex,
- armnn::IConnectableLayer& layer,
- uint32_t layerOutputIndex);
-
- template<typename HalOperation>
- bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
- uint32_t outputIndex,
- armnn::IConnectableLayer& layer);
+ // Shared aggregate input/output/internal data
+ ConversionData m_Data;
// Input data
- armnn::Compute m_Compute;
const HalModel& m_Model;
const std::set<unsigned int>& m_ForcedUnsupportedOperations;
// Output data
- armnn::INetworkPtr m_Network;
ConversionResult m_ConversionResult;
std::map<uint32_t, bool> m_OperationSupported;
-
- // Working/intermediate data
- std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
- std::vector<android::nn::RunTimePoolInfo> m_MemPools;
};
} // armnn_driver