aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNarumol Prangnawarat <narumol.prangnawarat@arm.com>2020-08-17 17:02:12 +0100
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2020-08-17 18:46:28 +0000
commitef6f300928e54cbe6bbfb9dfeb26c2db56ee5dc9 (patch)
tree7f9be0980ff5cede5ff8b25fced44f3c5cc0bd70
parent35c31c04a9805082be32474211dcd62b7629ed2b (diff)
downloadarmnn-ef6f300928e54cbe6bbfb9dfeb26c2db56ee5dc9.tar.gz
IVGCVSW-5114 Enable memory import in TfLiteYoloV3Big App
* Enable memory import in TfLiteYoloV3Big App * Add isMemoryManaged flag to Concat and Splitter layers Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com> Change-Id: I7e00f5da2a016c09d480b744fb17ea5611af8365
-rw-r--r--src/armnn/layers/ConcatLayer.cpp13
-rw-r--r--src/armnn/layers/ConcatLayer.hpp2
-rw-r--r--src/armnn/layers/SplitterLayer.cpp13
-rw-r--r--src/armnn/layers/SplitterLayer.hpp2
-rw-r--r--tests/TfLiteYoloV3Big-Armnn/TfLiteYoloV3Big-Armnn.cpp10
5 files changed, 22 insertions, 18 deletions
diff --git a/src/armnn/layers/ConcatLayer.cpp b/src/armnn/layers/ConcatLayer.cpp
index 5b6d25256b..0118426954 100644
--- a/src/armnn/layers/ConcatLayer.cpp
+++ b/src/armnn/layers/ConcatLayer.cpp
@@ -36,12 +36,14 @@ std::unique_ptr<IWorkload> ConcatLayer::CreateWorkload(const IWorkloadFactory& f
}
template<typename FactoryType>
-void ConcatLayer::CreateTensors(const TensorHandleFactoryRegistry& registry, const FactoryType& factory)
+void ConcatLayer::CreateTensors(const TensorHandleFactoryRegistry& registry,
+ const FactoryType& factory,
+ bool isMemoryManaged)
{
//If sub tensors are supported then the concat
//just needs to make sure that the outputs of the prev layer
//are made subtensors of the output of the concat layer.
- m_OutputHandlers[0].CreateTensorHandles(factory);
+ m_OutputHandlers[0].CreateTensorHandles(factory, isMemoryManaged);
if (factory.SupportsSubTensors())
{
@@ -168,21 +170,20 @@ void ConcatLayer::CreateTensors(const TensorHandleFactoryRegistry& registry, con
void ConcatLayer::CreateTensorHandles(const TensorHandleFactoryRegistry& registry,
const IWorkloadFactory& workloadFactory,
- const bool IsMemoryManaged)
+ const bool isMemoryManaged)
{
- IgnoreUnused(IsMemoryManaged);
OutputSlot& slot = GetOutputSlot(0);
ITensorHandleFactory::FactoryId factoryId = slot.GetTensorHandleFactoryId();
if (factoryId == ITensorHandleFactory::LegacyFactoryId)
{
- CreateTensors(registry, workloadFactory);
+ CreateTensors(registry, workloadFactory, isMemoryManaged);
}
else
{
ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
ARMNN_ASSERT(handleFactory);
- CreateTensors(registry, *handleFactory);
+ CreateTensors(registry, *handleFactory, isMemoryManaged);
}
}
diff --git a/src/armnn/layers/ConcatLayer.hpp b/src/armnn/layers/ConcatLayer.hpp
index eaa5c15a9c..3d9ba1815e 100644
--- a/src/armnn/layers/ConcatLayer.hpp
+++ b/src/armnn/layers/ConcatLayer.hpp
@@ -56,7 +56,7 @@ protected:
private:
template <typename FactoryType>
- void CreateTensors(const TensorHandleFactoryRegistry& registry, const FactoryType& factory);
+ void CreateTensors(const TensorHandleFactoryRegistry& registry, const FactoryType& factory, bool isMemoryManaged);
};
diff --git a/src/armnn/layers/SplitterLayer.cpp b/src/armnn/layers/SplitterLayer.cpp
index 75fc5378db..e5c9903e2f 100644
--- a/src/armnn/layers/SplitterLayer.cpp
+++ b/src/armnn/layers/SplitterLayer.cpp
@@ -33,7 +33,9 @@ std::unique_ptr<IWorkload> SplitterLayer::CreateWorkload(const IWorkloadFactory&
}
template<typename FactoryType>
-void SplitterLayer::CreateTensors(const TensorHandleFactoryRegistry& registry, const FactoryType& factory)
+void SplitterLayer::CreateTensors(const TensorHandleFactoryRegistry& registry,
+ const FactoryType& factory,
+ bool isMemoryManaged)
{
//If sub tensors are supported than all the "splitter" need to do is to
//set the outputs to be appropriate sub tensors of the input.
@@ -166,28 +168,27 @@ void SplitterLayer::CreateTensors(const TensorHandleFactoryRegistry& registry, c
{
for (unsigned int i = 0; i < m_Param.GetNumViews(); ++i)
{
- m_OutputHandlers[i].CreateTensorHandles(factory);
+ m_OutputHandlers[i].CreateTensorHandles(factory, isMemoryManaged);
}
}
}
void SplitterLayer::CreateTensorHandles(const TensorHandleFactoryRegistry& registry,
const IWorkloadFactory& workloadFactory,
- const bool IsMemoryManaged)
+ const bool isMemoryManaged)
{
- IgnoreUnused(IsMemoryManaged);
OutputSlot& slot = GetOutputSlot(0);
ITensorHandleFactory::FactoryId factoryId = slot.GetTensorHandleFactoryId();
if (factoryId == ITensorHandleFactory::LegacyFactoryId)
{
- CreateTensors(registry, workloadFactory);
+ CreateTensors(registry, workloadFactory, isMemoryManaged);
}
else
{
ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
ARMNN_ASSERT(handleFactory);
- CreateTensors(registry, *handleFactory);
+ CreateTensors(registry, *handleFactory, isMemoryManaged);
}
}
diff --git a/src/armnn/layers/SplitterLayer.hpp b/src/armnn/layers/SplitterLayer.hpp
index ae725b9ad0..9999009175 100644
--- a/src/armnn/layers/SplitterLayer.hpp
+++ b/src/armnn/layers/SplitterLayer.hpp
@@ -57,7 +57,7 @@ protected:
private:
template <typename FactoryType>
- void CreateTensors(const TensorHandleFactoryRegistry& registry, const FactoryType& factory);
+ void CreateTensors(const TensorHandleFactoryRegistry& registry, const FactoryType& factory, bool isMemoryManaged);
};
} // namespace
diff --git a/tests/TfLiteYoloV3Big-Armnn/TfLiteYoloV3Big-Armnn.cpp b/tests/TfLiteYoloV3Big-Armnn/TfLiteYoloV3Big-Armnn.cpp
index bf3578c37e..fcc21771cc 100644
--- a/tests/TfLiteYoloV3Big-Armnn/TfLiteYoloV3Big-Armnn.cpp
+++ b/tests/TfLiteYoloV3Big-Armnn/TfLiteYoloV3Big-Armnn.cpp
@@ -101,7 +101,8 @@ int LoadModel(const char* filename,
ITfLiteParser& parser,
IRuntime& runtime,
NetworkId& networkId,
- const std::vector<BackendId>& backendPreferences)
+ const std::vector<BackendId>& backendPreferences,
+ bool enableImport = false)
{
std::ifstream stream(filename, std::ios::in | std::ios::binary);
if (!stream.is_open())
@@ -125,10 +126,10 @@ int LoadModel(const char* filename,
return OPTIMIZE_NETWORK_ERROR;
}
- // Load backbone model into runtime
+ // Load model into runtime
{
std::string errorMessage;
- INetworkProperties modelProps;
+ INetworkProperties modelProps(enableImport, enableImport);
Status status = runtime.LoadNetwork(networkId, std::move(optimizedModel), errorMessage, modelProps);
if (status != Status::Success)
{
@@ -346,7 +347,8 @@ int main(int argc, char* argv[])
// Load detector model
ARMNN_LOG(info) << "Loading detector...";
NetworkId detectorId;
- CHECK_OK(LoadModel(progArgs.detectorDir.c_str(), *parser, *runtime, detectorId, progArgs.prefBackendsDetector));
+ CHECK_OK(LoadModel(
+ progArgs.detectorDir.c_str(), *parser, *runtime, detectorId, progArgs.prefBackendsDetector, true));
auto detectIn0Id = parser->GetNetworkInputBindingInfo(0, "input_to_detector_1");
auto detectIn1Id = parser->GetNetworkInputBindingInfo(0, "input_to_detector_2");
auto detectIn2Id = parser->GetNetworkInputBindingInfo(0, "input_to_detector_3");