From ef6f300928e54cbe6bbfb9dfeb26c2db56ee5dc9 Mon Sep 17 00:00:00 2001 From: Narumol Prangnawarat Date: Mon, 17 Aug 2020 17:02:12 +0100 Subject: IVGCVSW-5114 Enable memory import in TfLiteYoloV3Big App * Enable memory import in TfLiteYoloV3Big App * Add isMemoryManaged flag to Concat and Splitter layers Signed-off-by: Narumol Prangnawarat Change-Id: I7e00f5da2a016c09d480b744fb17ea5611af8365 --- src/armnn/layers/ConcatLayer.cpp | 13 +++++++------ src/armnn/layers/ConcatLayer.hpp | 2 +- src/armnn/layers/SplitterLayer.cpp | 13 +++++++------ src/armnn/layers/SplitterLayer.hpp | 2 +- tests/TfLiteYoloV3Big-Armnn/TfLiteYoloV3Big-Armnn.cpp | 10 ++++++---- 5 files changed, 22 insertions(+), 18 deletions(-) diff --git a/src/armnn/layers/ConcatLayer.cpp b/src/armnn/layers/ConcatLayer.cpp index 5b6d25256b..0118426954 100644 --- a/src/armnn/layers/ConcatLayer.cpp +++ b/src/armnn/layers/ConcatLayer.cpp @@ -36,12 +36,14 @@ std::unique_ptr ConcatLayer::CreateWorkload(const IWorkloadFactory& f } template -void ConcatLayer::CreateTensors(const TensorHandleFactoryRegistry& registry, const FactoryType& factory) +void ConcatLayer::CreateTensors(const TensorHandleFactoryRegistry& registry, + const FactoryType& factory, + bool isMemoryManaged) { //If sub tensors are supported then the concat //just needs to make sure that the outputs of the prev layer //are made subtensors of the output of the concat layer. - m_OutputHandlers[0].CreateTensorHandles(factory); + m_OutputHandlers[0].CreateTensorHandles(factory, isMemoryManaged); if (factory.SupportsSubTensors()) { @@ -168,21 +170,20 @@ void ConcatLayer::CreateTensors(const TensorHandleFactoryRegistry& registry, con void ConcatLayer::CreateTensorHandles(const TensorHandleFactoryRegistry& registry, const IWorkloadFactory& workloadFactory, - const bool IsMemoryManaged) + const bool isMemoryManaged) { - IgnoreUnused(IsMemoryManaged); OutputSlot& slot = GetOutputSlot(0); ITensorHandleFactory::FactoryId factoryId = slot.GetTensorHandleFactoryId(); if (factoryId == ITensorHandleFactory::LegacyFactoryId) { - CreateTensors(registry, workloadFactory); + CreateTensors(registry, workloadFactory, isMemoryManaged); } else { ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId); ARMNN_ASSERT(handleFactory); - CreateTensors(registry, *handleFactory); + CreateTensors(registry, *handleFactory, isMemoryManaged); } } diff --git a/src/armnn/layers/ConcatLayer.hpp b/src/armnn/layers/ConcatLayer.hpp index eaa5c15a9c..3d9ba1815e 100644 --- a/src/armnn/layers/ConcatLayer.hpp +++ b/src/armnn/layers/ConcatLayer.hpp @@ -56,7 +56,7 @@ protected: private: template - void CreateTensors(const TensorHandleFactoryRegistry& registry, const FactoryType& factory); + void CreateTensors(const TensorHandleFactoryRegistry& registry, const FactoryType& factory, bool isMemoryManaged); }; diff --git a/src/armnn/layers/SplitterLayer.cpp b/src/armnn/layers/SplitterLayer.cpp index 75fc5378db..e5c9903e2f 100644 --- a/src/armnn/layers/SplitterLayer.cpp +++ b/src/armnn/layers/SplitterLayer.cpp @@ -33,7 +33,9 @@ std::unique_ptr SplitterLayer::CreateWorkload(const IWorkloadFactory& } template -void SplitterLayer::CreateTensors(const TensorHandleFactoryRegistry& registry, const FactoryType& factory) +void SplitterLayer::CreateTensors(const TensorHandleFactoryRegistry& registry, + const FactoryType& factory, + bool isMemoryManaged) { //If sub tensors are supported than all the "splitter" need to do is to //set the outputs to be appropriate sub tensors of the input. @@ -166,28 +168,27 @@ void SplitterLayer::CreateTensors(const TensorHandleFactoryRegistry& registry, c { for (unsigned int i = 0; i < m_Param.GetNumViews(); ++i) { - m_OutputHandlers[i].CreateTensorHandles(factory); + m_OutputHandlers[i].CreateTensorHandles(factory, isMemoryManaged); } } } void SplitterLayer::CreateTensorHandles(const TensorHandleFactoryRegistry& registry, const IWorkloadFactory& workloadFactory, - const bool IsMemoryManaged) + const bool isMemoryManaged) { - IgnoreUnused(IsMemoryManaged); OutputSlot& slot = GetOutputSlot(0); ITensorHandleFactory::FactoryId factoryId = slot.GetTensorHandleFactoryId(); if (factoryId == ITensorHandleFactory::LegacyFactoryId) { - CreateTensors(registry, workloadFactory); + CreateTensors(registry, workloadFactory, isMemoryManaged); } else { ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId); ARMNN_ASSERT(handleFactory); - CreateTensors(registry, *handleFactory); + CreateTensors(registry, *handleFactory, isMemoryManaged); } } diff --git a/src/armnn/layers/SplitterLayer.hpp b/src/armnn/layers/SplitterLayer.hpp index ae725b9ad0..9999009175 100644 --- a/src/armnn/layers/SplitterLayer.hpp +++ b/src/armnn/layers/SplitterLayer.hpp @@ -57,7 +57,7 @@ protected: private: template - void CreateTensors(const TensorHandleFactoryRegistry& registry, const FactoryType& factory); + void CreateTensors(const TensorHandleFactoryRegistry& registry, const FactoryType& factory, bool isMemoryManaged); }; } // namespace diff --git a/tests/TfLiteYoloV3Big-Armnn/TfLiteYoloV3Big-Armnn.cpp b/tests/TfLiteYoloV3Big-Armnn/TfLiteYoloV3Big-Armnn.cpp index bf3578c37e..fcc21771cc 100644 --- a/tests/TfLiteYoloV3Big-Armnn/TfLiteYoloV3Big-Armnn.cpp +++ b/tests/TfLiteYoloV3Big-Armnn/TfLiteYoloV3Big-Armnn.cpp @@ -101,7 +101,8 @@ int LoadModel(const char* filename, ITfLiteParser& parser, IRuntime& runtime, NetworkId& networkId, - const std::vector& backendPreferences) + const std::vector& backendPreferences, + bool enableImport = false) { std::ifstream stream(filename, std::ios::in | std::ios::binary); if (!stream.is_open()) @@ -125,10 +126,10 @@ int LoadModel(const char* filename, return OPTIMIZE_NETWORK_ERROR; } - // Load backbone model into runtime + // Load model into runtime { std::string errorMessage; - INetworkProperties modelProps; + INetworkProperties modelProps(enableImport, enableImport); Status status = runtime.LoadNetwork(networkId, std::move(optimizedModel), errorMessage, modelProps); if (status != Status::Success) { @@ -346,7 +347,8 @@ int main(int argc, char* argv[]) // Load detector model ARMNN_LOG(info) << "Loading detector..."; NetworkId detectorId; - CHECK_OK(LoadModel(progArgs.detectorDir.c_str(), *parser, *runtime, detectorId, progArgs.prefBackendsDetector)); + CHECK_OK(LoadModel( + progArgs.detectorDir.c_str(), *parser, *runtime, detectorId, progArgs.prefBackendsDetector, true)); auto detectIn0Id = parser->GetNetworkInputBindingInfo(0, "input_to_detector_1"); auto detectIn1Id = parser->GetNetworkInputBindingInfo(0, "input_to_detector_2"); auto detectIn2Id = parser->GetNetworkInputBindingInfo(0, "input_to_detector_3"); -- cgit v1.2.1