diff options
author | Kevin May <kevin.may@arm.com> | 2020-03-26 13:34:14 +0000 |
---|---|---|
committer | Kevin May <kevin.may@arm.com> | 2020-03-26 17:39:25 +0000 |
commit | 42477c1d3e7ddf74863e84ab79dbe6f42e4a0ba3 (patch) | |
tree | e5260f4b9e5e36080269243c1f1cd74f5589b206 /ModelToINetworkConverter.cpp | |
parent | cae7e927a5b5559f67bb87a1737f6606d5d6f328 (diff) | |
download | android-nn-driver-42477c1d3e7ddf74863e84ab79dbe6f42e4a0ba3.tar.gz |
IVGCVSW-4447 Add Hal 1_3 Support
* Add new 1.3 files HalPolicy, ArmnnDriver, ArmnnDriverImpl
* Add new .rc file for 1.3 service
* Add ArmnnPreparedModel_1_3 and implement new functions
* Update Android.mk with 1.3 driver and service
* Refactor ifdef to include ARMNN_ANDROID_NN_V1_3
* Create Utils getMainModel for new 1.3 Model Main Subgraph
* Use android Utils to convertToV1_X in ArmnnPrepapredModel_1_3
* Refactor HAL 1.2 convert functions into ConversionUtils_1_2.hpp
* Replace ArmnnBurstExecutorWithCache with call to ExecutionBurstServer
Signed-off-by: Kevin May <kevin.may@arm.com>
Change-Id: I514069e9e1b16bcd1c4abfb5d563d25ac22d02e3
Diffstat (limited to 'ModelToINetworkConverter.cpp')
-rw-r--r-- | ModelToINetworkConverter.cpp | 36 |
1 files changed, 26 insertions, 10 deletions
diff --git a/ModelToINetworkConverter.cpp b/ModelToINetworkConverter.cpp index 05e60462..24fb4903 100644 --- a/ModelToINetworkConverter.cpp +++ b/ModelToINetworkConverter.cpp @@ -6,8 +6,10 @@ #define LOG_TAG "ArmnnDriver" #include "ModelToINetworkConverter.hpp" +#include "Utils.hpp" #include <log/log.h> +#include <type_traits> namespace armnn_driver { @@ -62,21 +64,29 @@ void ModelToINetworkConverter<HalPolicy>::Convert() // add operations to it // track which layer outputs each operand - m_Data.m_OutputSlotForOperand = std::vector<armnn::IOutputSlot*>(m_Model.operands.size(), nullptr); - + ALOGV("ModelToINetworkConverter::Convert(): m_OutputSlotForOperand"); + m_Data.m_OutputSlotForOperand = std::vector<armnn::IOutputSlot*>(getMainModel(m_Model).operands.size(), nullptr); try { - for (uint32_t i = 0; i < m_Model.inputIndexes.size(); i++) + ALOGV("ModelToINetworkConverter::Convert(): for getMainModel(m_Model).inputIndexes.size()"); + for (uint32_t i = 0; i < getMainModel(m_Model).inputIndexes.size(); i++) { + ALOGV("ModelToINetworkConverter::Convert(): getMainModel(m_Model).inputIndexes[i]"); // inputs in android nn are represented by operands - uint32_t inputIndex = m_Model.inputIndexes[i]; - const HalOperand& operand = m_Model.operands[inputIndex]; + uint32_t inputIndex = getMainModel(m_Model).inputIndexes[i]; + ALOGV("ModelToINetworkConverter::Convert(): getMainModel(m_Model).operands[inputIndex];"); + const HalOperand& operand = getMainModel(m_Model).operands[inputIndex]; + ALOGV("ModelToINetworkConverter::Convert(): GetTensorInfoForOperand(operand)"); const armnn::TensorInfo& tensor = GetTensorInfoForOperand(operand); + ALOGV("ModelToINetworkConverter::Convert(): m_Data.m_Network->AddInputLayer(i)"); armnn::IConnectableLayer* layer = m_Data.m_Network->AddInputLayer(i); + ALOGV("ModelToINetworkConverter::Convert(): layer->GetOutputSlot(0)"); armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0); + ALOGV("ModelToINetworkConverter::Convert(): outputSlot.SetTensorInfo(GetTensorInfoForOperand(operand))"); outputSlot.SetTensorInfo(GetTensorInfoForOperand(operand)); + ALOGV("ModelToINetworkConverter::Convert(): m_Data.m_OutputSlotForOperand[inputIndex] = &outputSlot"); // store for later layers m_Data.m_OutputSlotForOperand[inputIndex] = &outputSlot; } @@ -92,9 +102,9 @@ void ModelToINetworkConverter<HalPolicy>::Convert() m_ConversionResult = ConversionResult::UnsupportedFeature; } - for (uint32_t operationIdx = 0; operationIdx < m_Model.operations.size(); operationIdx++) + for (uint32_t operationIdx = 0; operationIdx < getMainModel(m_Model).operations.size(); operationIdx++) { - const auto& operation = m_Model.operations[operationIdx]; + const auto& operation = getMainModel(m_Model).operations[operationIdx]; bool ok = true; if (m_ForcedUnsupportedOperations.find(operationIdx) != m_ForcedUnsupportedOperations.end()) @@ -135,11 +145,11 @@ void ModelToINetworkConverter<HalPolicy>::Convert() { if (m_ConversionResult == ConversionResult::Success) { - for (uint32_t i = 0; i < m_Model.outputIndexes.size(); i++) + for (uint32_t i = 0; i < getMainModel(m_Model).outputIndexes.size(); i++) { // outputs in android nn are represented by operands - uint32_t outputIndex = m_Model.outputIndexes[i]; - const HalOperand& operand = m_Model.operands[outputIndex]; + uint32_t outputIndex = getMainModel(m_Model).outputIndexes[i]; + const HalOperand& operand = getMainModel(m_Model).operands[outputIndex]; const armnn::TensorInfo& tensor = GetTensorInfoForOperand(operand); armnn::IConnectableLayer* layer = m_Data.m_Network->AddOutputLayer(i); @@ -178,4 +188,10 @@ template class ModelToINetworkConverter<hal_1_1::HalPolicy>; template class ModelToINetworkConverter<hal_1_2::HalPolicy>; #endif +#ifdef ARMNN_ANDROID_NN_V1_3 +template class ModelToINetworkConverter<hal_1_1::HalPolicy>; +template class ModelToINetworkConverter<hal_1_2::HalPolicy>; +template class ModelToINetworkConverter<hal_1_3::HalPolicy>; +#endif + } // armnn_driver |