diff options
author | Kevin May <kevin.may@arm.com> | 2020-03-26 13:34:14 +0000 |
---|---|---|
committer | Kevin May <kevin.may@arm.com> | 2020-03-26 17:39:25 +0000 |
commit | 42477c1d3e7ddf74863e84ab79dbe6f42e4a0ba3 (patch) | |
tree | e5260f4b9e5e36080269243c1f1cd74f5589b206 /ArmnnPreparedModel.cpp | |
parent | cae7e927a5b5559f67bb87a1737f6606d5d6f328 (diff) | |
download | android-nn-driver-42477c1d3e7ddf74863e84ab79dbe6f42e4a0ba3.tar.gz |
IVGCVSW-4447 Add Hal 1_3 Support
* Add new 1.3 files HalPolicy, ArmnnDriver, ArmnnDriverImpl
* Add new .rc file for 1.3 service
* Add ArmnnPreparedModel_1_3 and implement new functions
* Update Android.mk with 1.3 driver and service
* Refactor ifdef to include ARMNN_ANDROID_NN_V1_3
* Create Utils getMainModel for new 1.3 Model Main Subgraph
* Use android Utils to convertToV1_X in ArmnnPrepapredModel_1_3
* Refactor HAL 1.2 convert functions into ConversionUtils_1_2.hpp
* Replace ArmnnBurstExecutorWithCache with call to ExecutionBurstServer
Signed-off-by: Kevin May <kevin.may@arm.com>
Change-Id: I514069e9e1b16bcd1c4abfb5d563d25ac22d02e3
Diffstat (limited to 'ArmnnPreparedModel.cpp')
-rw-r--r-- | ArmnnPreparedModel.cpp | 10 |
1 files changed, 8 insertions, 2 deletions
diff --git a/ArmnnPreparedModel.cpp b/ArmnnPreparedModel.cpp index d095e419..f990d3bc 100644 --- a/ArmnnPreparedModel.cpp +++ b/ArmnnPreparedModel.cpp @@ -294,7 +294,7 @@ bool ArmnnPreparedModel<HalVersion>::ExecuteWithDummyInputs() { std::vector<std::vector<char>> storage; armnn::InputTensors inputTensors; - for (unsigned int i = 0; i < m_Model.inputIndexes.size(); i++) + for (unsigned int i = 0; i < getMainModel(m_Model).inputIndexes.size(); i++) { const armnn::TensorInfo inputTensorInfo = m_Runtime->GetInputTensorInfo(m_NetworkId, i); storage.emplace_back(inputTensorInfo.GetNumBytes()); @@ -304,7 +304,7 @@ bool ArmnnPreparedModel<HalVersion>::ExecuteWithDummyInputs() } armnn::OutputTensors outputTensors; - for (unsigned int i = 0; i < m_Model.outputIndexes.size(); i++) + for (unsigned int i = 0; i < getMainModel(m_Model).outputIndexes.size(); i++) { const armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkId, i); storage.emplace_back(outputTensorInfo.GetNumBytes()); @@ -349,4 +349,10 @@ template class ArmnnPreparedModel<hal_1_1::HalPolicy>; template class ArmnnPreparedModel<hal_1_1::HalPolicy>; template class ArmnnPreparedModel<hal_1_2::HalPolicy>; #endif + +#ifdef ARMNN_ANDROID_NN_V1_3 +template class ArmnnPreparedModel<hal_1_1::HalPolicy>; +template class ArmnnPreparedModel<hal_1_2::HalPolicy>; +template class ArmnnPreparedModel<hal_1_3::HalPolicy>; +#endif } // namespace armnn_driver |