aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFrancis Murtagh <francis.murtagh@arm.com>2021-04-29 14:23:04 +0100
committerFrancis Murtagh <francis.murtagh@arm.com>2021-04-29 14:55:27 +0000
commit73d3e2e1616ba5dcdb0a190afba2463742bd4fcc (patch)
tree5e03f174a763c275d5874c804996048fb0b505ab
parent4df97eb257d3fc29b7431d9cb8a054b21d5a7448 (diff)
downloadarmnn-73d3e2e1616ba5dcdb0a190afba2463742bd4fcc.tar.gz
IVGCVSW-5819 5820 5821 Add MemorySourceFlags to TensorHandleFactoryRegistry::GetFactory
* Modify Layer::CreateTensorHandles to include MemorySource * Modify INetworkProperties to add MemorySource * Disable Neon/Cl fallback tests until full import implementation complete Change-Id: Ia4fff6ea3d4bf6afca33aae358125ccaec7f9a38 Signed-off-by: Francis Murtagh <francis.murtagh@arm.com>
-rw-r--r--delegate/src/armnn_delegate.cpp4
-rw-r--r--delegate/src/test/DelegateOptionsTest.cpp2
-rw-r--r--include/armnn/IRuntime.hpp29
-rw-r--r--include/armnn/MemorySources.hpp10
-rw-r--r--include/armnn/Types.hpp9
-rw-r--r--src/armnn/Layer.cpp13
-rw-r--r--src/armnn/Layer.hpp3
-rw-r--r--src/armnn/LoadedNetwork.cpp12
-rw-r--r--src/armnn/Runtime.cpp2
-rw-r--r--src/armnn/layers/ConcatLayer.cpp13
-rw-r--r--src/armnn/layers/ConcatLayer.hpp4
-rw-r--r--src/armnn/layers/OutputLayer.hpp5
-rw-r--r--src/armnn/layers/SplitterLayer.cpp13
-rw-r--r--src/armnn/layers/SplitterLayer.hpp4
-rw-r--r--src/backends/backendsCommon/TensorHandleFactoryRegistry.cpp14
-rw-r--r--src/backends/backendsCommon/TensorHandleFactoryRegistry.hpp4
-rw-r--r--src/backends/backendsCommon/test/EndToEndTestImpl.hpp18
-rw-r--r--src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp4
-rw-r--r--src/backends/cl/test/ClFallbackTests.cpp10
-rw-r--r--src/backends/neon/test/NeonFallbackTests.cpp21
-rw-r--r--tests/InferenceModel.hpp4
-rw-r--r--tests/TfLiteYoloV3Big-Armnn/TfLiteYoloV3Big-Armnn.cpp5
22 files changed, 145 insertions, 58 deletions
diff --git a/delegate/src/armnn_delegate.cpp b/delegate/src/armnn_delegate.cpp
index 4c625a9867..e637ca6ebf 100644
--- a/delegate/src/armnn_delegate.cpp
+++ b/delegate/src/armnn_delegate.cpp
@@ -365,7 +365,9 @@ ArmnnSubgraph* ArmnnSubgraph::Create(TfLiteContext* tfLiteContext,
armnn::Status loadingStatus;
if (delegate->m_Options.GetOptimizerOptions().m_ImportEnabled)
{
- armnn::INetworkProperties networkProperties(true, true);
+ armnn::INetworkProperties networkProperties(false,
+ armnn::MemorySource::Malloc,
+ armnn::MemorySource::Malloc);
loadingStatus = delegate->m_Runtime->LoadNetwork(networkId,
std::move(optNet),
errorMessage,
diff --git a/delegate/src/test/DelegateOptionsTest.cpp b/delegate/src/test/DelegateOptionsTest.cpp
index 6024754be8..8f6027df58 100644
--- a/delegate/src/test/DelegateOptionsTest.cpp
+++ b/delegate/src/test/DelegateOptionsTest.cpp
@@ -93,7 +93,7 @@ TEST_CASE ("ArmnnDelegateOptimizerOptionsDebugFunction")
callback = true;
};
- armnn::INetworkProperties networkProperties;
+ armnn::INetworkProperties networkProperties(false, armnn::MemorySource::Undefined, armnn::MemorySource::Undefined);
armnnDelegate::DelegateOptions delegateOptions(backends,
optimizerOptions,
armnn::EmptyOptional(),
diff --git a/include/armnn/IRuntime.hpp b/include/armnn/IRuntime.hpp
index fc203e67e4..55c57974dc 100644
--- a/include/armnn/IRuntime.hpp
+++ b/include/armnn/IRuntime.hpp
@@ -28,14 +28,35 @@ using IRuntimePtr = std::unique_ptr<IRuntime, void(*)(IRuntime* runtime)>;
struct INetworkProperties
{
- INetworkProperties(bool importEnabled = false, bool exportEnabled = false, bool asyncEnabled = false)
- : m_ImportEnabled(importEnabled),
- m_ExportEnabled(exportEnabled),
- m_AsyncEnabled(asyncEnabled) {}
+ ARMNN_DEPRECATED_MSG("Please use INetworkProperties constructor with MemorySource argument")
+ INetworkProperties(bool importEnabled = false,
+ bool exportEnabled = false,
+ bool asyncEnabled = false)
+ : m_ImportEnabled(importEnabled)
+ , m_ExportEnabled(exportEnabled)
+ , m_AsyncEnabled(asyncEnabled)
+ , m_InputSource(MemorySource::Undefined)
+ , m_OutputSource(MemorySource::Undefined)
+ {}
+
+ INetworkProperties(bool asyncEnabled,
+ MemorySource m_InputSource,
+ MemorySource m_OutputSource)
+ : m_ImportEnabled(m_InputSource != MemorySource::Undefined)
+ , m_ExportEnabled(m_OutputSource != MemorySource::Undefined)
+ , m_AsyncEnabled(asyncEnabled)
+ , m_InputSource(m_InputSource)
+ , m_OutputSource(m_OutputSource)
+ {}
+ /// Deprecated and will be removed in future release.
const bool m_ImportEnabled;
+ /// Deprecated and will be removed in future release.
const bool m_ExportEnabled;
+
const bool m_AsyncEnabled;
+ const MemorySource m_InputSource;
+ const MemorySource m_OutputSource;
virtual ~INetworkProperties() {}
};
diff --git a/include/armnn/MemorySources.hpp b/include/armnn/MemorySources.hpp
index 53890982d4..53746a3bc3 100644
--- a/include/armnn/MemorySources.hpp
+++ b/include/armnn/MemorySources.hpp
@@ -5,19 +5,13 @@
#pragma once
+#include <armnn/Types.hpp>
+
#include <type_traits>
namespace armnn
{
-enum class MemorySource
-{
- Undefined = 0,
- Malloc = 1,
- DmaBuf = 2,
- DmaBufProtected = 4
-};
-
using MemorySourceFlags = unsigned int;
template<typename T>
diff --git a/include/armnn/Types.hpp b/include/armnn/Types.hpp
index 2fd40b84dd..bc41003c57 100644
--- a/include/armnn/Types.hpp
+++ b/include/armnn/Types.hpp
@@ -182,6 +182,15 @@ enum class ShapeInferenceMethod
InferAndValidate = 1
};
+/// Define the Memory Source to reduce copies
+enum class MemorySource : uint32_t
+{
+ Undefined = 0,
+ Malloc = 1,
+ DmaBuf = 2,
+ DmaBufProtected = 4
+};
+
/// Each backend should implement an IBackend.
class IBackend
{
diff --git a/src/armnn/Layer.cpp b/src/armnn/Layer.cpp
index 13d834f6ae..782f1939b0 100644
--- a/src/armnn/Layer.cpp
+++ b/src/armnn/Layer.cpp
@@ -249,7 +249,8 @@ void Layer::SetAdditionalInfo(QueueDescriptor& descriptor) const
void Layer::CreateTensorHandles(const TensorHandleFactoryRegistry& registry,
const IWorkloadFactory& workloadFactory,
- const bool IsMemoryManaged)
+ const bool IsMemoryManaged,
+ MemorySource memSource)
{
for (unsigned int idx=0; idx < GetNumOutputSlots(); idx++)
{
@@ -264,7 +265,15 @@ void Layer::CreateTensorHandles(const TensorHandleFactoryRegistry& registry,
}
else
{
- ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
+ ITensorHandleFactory* handleFactory;
+ if (memSource == MemorySource::Undefined )
+ {
+ handleFactory = registry.GetFactory(factoryId);
+ }
+ else
+ {
+ handleFactory = registry.GetFactory(factoryId, memSource);
+ }
ARMNN_ASSERT(handleFactory);
handler.CreateTensorHandles(*handleFactory, IsMemoryManaged);
}
diff --git a/src/armnn/Layer.hpp b/src/armnn/Layer.hpp
index 5ab6b3152f..d43545c01f 100644
--- a/src/armnn/Layer.hpp
+++ b/src/armnn/Layer.hpp
@@ -275,7 +275,8 @@ public:
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry& registry,
const IWorkloadFactory& factory,
- const bool IsMemoryManaged = true);
+ const bool IsMemoryManaged = true,
+ MemorySource memSource = MemorySource::Undefined);
/// Creates a dynamically-allocated copy of this layer.
/// @param graph - The Graph into which this Layer is being cloned.
diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp
index 85451cb0d8..5c5a963212 100644
--- a/src/armnn/LoadedNetwork.cpp
+++ b/src/armnn/LoadedNetwork.cpp
@@ -174,8 +174,10 @@ LoadedNetwork::LoadedNetwork(std::unique_ptr<IOptimizedNetwork> net,
{
// If IsImportEnabled is true then we need to set IsMemoryManaged
// to false when creating TensorHandles
- layer->CreateTensorHandles(m_TensorHandleFactoryRegistry, workloadFactory,
- !m_NetworkProperties.m_ImportEnabled);
+ layer->CreateTensorHandles(m_TensorHandleFactoryRegistry,
+ workloadFactory,
+ !m_NetworkProperties.m_ImportEnabled,
+ m_NetworkProperties.m_InputSource);
break;
}
default:
@@ -186,8 +188,10 @@ LoadedNetwork::LoadedNetwork(std::unique_ptr<IOptimizedNetwork> net,
(layer->GetOutputSlots()[0].GetNumConnections() == 1) &&
(layer->GetOutputSlots()[0].GetConnection(0)->GetOwningLayer().GetType() == LayerType::Output))
{
- layer->CreateTensorHandles(m_TensorHandleFactoryRegistry, workloadFactory,
- !m_NetworkProperties.m_ExportEnabled);
+ layer->CreateTensorHandles(m_TensorHandleFactoryRegistry,
+ workloadFactory,
+ !m_NetworkProperties.m_ExportEnabled,
+ m_NetworkProperties.m_OutputSource);
}
else
{
diff --git a/src/armnn/Runtime.cpp b/src/armnn/Runtime.cpp
index 91a21d4b53..1dd86a61ce 100644
--- a/src/armnn/Runtime.cpp
+++ b/src/armnn/Runtime.cpp
@@ -128,7 +128,7 @@ Status RuntimeImpl::LoadNetwork(NetworkId& networkIdOut,
IOptimizedNetworkPtr inNetwork,
std::string& errorMessage)
{
- INetworkProperties networkProperties;
+ INetworkProperties networkProperties(false, MemorySource::Undefined, MemorySource::Undefined);
return LoadNetwork(networkIdOut, std::move(inNetwork), errorMessage, networkProperties);
}
diff --git a/src/armnn/layers/ConcatLayer.cpp b/src/armnn/layers/ConcatLayer.cpp
index 238fdb66d9..3a20e1b3f6 100644
--- a/src/armnn/layers/ConcatLayer.cpp
+++ b/src/armnn/layers/ConcatLayer.cpp
@@ -179,7 +179,8 @@ void ConcatLayer::CreateTensors(const TensorHandleFactoryRegistry& registry,
void ConcatLayer::CreateTensorHandles(const TensorHandleFactoryRegistry& registry,
const IWorkloadFactory& workloadFactory,
- const bool isMemoryManaged)
+ const bool isMemoryManaged,
+ MemorySource memSource)
{
OutputSlot& slot = GetOutputSlot(0);
ITensorHandleFactory::FactoryId factoryId = slot.GetTensorHandleFactoryId();
@@ -190,7 +191,15 @@ void ConcatLayer::CreateTensorHandles(const TensorHandleFactoryRegistry& registr
}
else
{
- ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
+ ITensorHandleFactory* handleFactory;
+ if (memSource == MemorySource::Undefined)
+ {
+ handleFactory = registry.GetFactory(factoryId);
+ }
+ else
+ {
+ handleFactory = registry.GetFactory(factoryId, memSource);
+ }
ARMNN_ASSERT(handleFactory);
CreateTensors(registry, *handleFactory, isMemoryManaged);
}
diff --git a/src/armnn/layers/ConcatLayer.hpp b/src/armnn/layers/ConcatLayer.hpp
index 3d9ba1815e..6a43318382 100644
--- a/src/armnn/layers/ConcatLayer.hpp
+++ b/src/armnn/layers/ConcatLayer.hpp
@@ -24,9 +24,11 @@ public:
/// @param [in] registry Contains all the registered tensor handle factories available for use.
/// @param [in] factory The workload factory which will create the workload.
/// @param [in] IsMemoryManaged Determine whether or not to assign a memory manager during creation
+ /// @param [in] MemorySource Determine the source of memory e.g Malloc
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry& registry,
const IWorkloadFactory& factory,
- const bool IsMemoryManaged = true) override;
+ const bool IsMemoryManaged = true,
+ MemorySource memSource = MemorySource::Undefined) override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/OutputLayer.hpp b/src/armnn/layers/OutputLayer.hpp
index 6315e25da1..fc6a8aa6b2 100644
--- a/src/armnn/layers/OutputLayer.hpp
+++ b/src/armnn/layers/OutputLayer.hpp
@@ -26,9 +26,10 @@ public:
/// @param [in] IsMemoryManaged Determine whether or not to assign a memory manager during creation
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry& registry,
const IWorkloadFactory& factory,
- const bool IsMemoryManaged = true) override
+ const bool isMemoryManaged = true,
+ MemorySource memSource = MemorySource::Undefined) override
{
- IgnoreUnused(registry, factory, IsMemoryManaged);
+ IgnoreUnused(registry, factory, isMemoryManaged, memSource);
}
/// Creates a dynamically-allocated copy of this layer.
diff --git a/src/armnn/layers/SplitterLayer.cpp b/src/armnn/layers/SplitterLayer.cpp
index 5e6622e13a..adef9aa1a2 100644
--- a/src/armnn/layers/SplitterLayer.cpp
+++ b/src/armnn/layers/SplitterLayer.cpp
@@ -177,7 +177,8 @@ void SplitterLayer::CreateTensors(const TensorHandleFactoryRegistry& registry,
void SplitterLayer::CreateTensorHandles(const TensorHandleFactoryRegistry& registry,
const IWorkloadFactory& workloadFactory,
- const bool isMemoryManaged)
+ const bool isMemoryManaged,
+ MemorySource memSource)
{
OutputSlot& slot = GetOutputSlot(0);
ITensorHandleFactory::FactoryId factoryId = slot.GetTensorHandleFactoryId();
@@ -188,7 +189,15 @@ void SplitterLayer::CreateTensorHandles(const TensorHandleFactoryRegistry& regis
}
else
{
- ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
+ ITensorHandleFactory* handleFactory;
+ if (memSource == MemorySource::Undefined)
+ {
+ handleFactory = registry.GetFactory(factoryId);
+ }
+ else
+ {
+ handleFactory = registry.GetFactory(factoryId, memSource);
+ }
ARMNN_ASSERT(handleFactory);
CreateTensors(registry, *handleFactory, isMemoryManaged);
}
diff --git a/src/armnn/layers/SplitterLayer.hpp b/src/armnn/layers/SplitterLayer.hpp
index 9999009175..075b136da9 100644
--- a/src/armnn/layers/SplitterLayer.hpp
+++ b/src/armnn/layers/SplitterLayer.hpp
@@ -24,10 +24,10 @@ public:
/// @param [in] registry Contains all the registered tensor handle factories available for use.
/// @param [in] factory The workload factory which will create the workload.
/// @param [in] IsMemoryManaged Determine whether or not to assign a memory manager during creation
- //virtual void CreateTensorHandles(Graph& graph, const IWorkloadFactory& factory) override;
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry& registry,
const IWorkloadFactory& factory,
- const bool IsMemoryManaged = true) override;
+ const bool IsMemoryManaged = true,
+ MemorySource memSource = MemorySource::Undefined) override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/backends/backendsCommon/TensorHandleFactoryRegistry.cpp b/src/backends/backendsCommon/TensorHandleFactoryRegistry.cpp
index 0670461b54..cc8a1361a3 100644
--- a/src/backends/backendsCommon/TensorHandleFactoryRegistry.cpp
+++ b/src/backends/backendsCommon/TensorHandleFactoryRegistry.cpp
@@ -49,6 +49,20 @@ ITensorHandleFactory* TensorHandleFactoryRegistry::GetFactory(ITensorHandleFacto
return nullptr;
}
+ITensorHandleFactory* TensorHandleFactoryRegistry::GetFactory(ITensorHandleFactory::FactoryId id,
+ MemorySource memSource) const
+{
+ for (auto& factory : m_Factories)
+ {
+ if (factory->GetId() == id && factory->GetImportFlags() == static_cast<MemorySourceFlags>(memSource))
+ {
+ return factory.get();
+ }
+ }
+
+ return nullptr;
+}
+
void TensorHandleFactoryRegistry::AquireMemory()
{
for (auto& mgr : m_MemoryManagers)
diff --git a/src/backends/backendsCommon/TensorHandleFactoryRegistry.hpp b/src/backends/backendsCommon/TensorHandleFactoryRegistry.hpp
index e9e76e73a6..525db56216 100644
--- a/src/backends/backendsCommon/TensorHandleFactoryRegistry.hpp
+++ b/src/backends/backendsCommon/TensorHandleFactoryRegistry.hpp
@@ -35,6 +35,10 @@ public:
/// Returns nullptr if not found
ITensorHandleFactory* GetFactory(ITensorHandleFactory::FactoryId id) const;
+ /// Overload of above allowing specification of Memory Source
+ ITensorHandleFactory* GetFactory(ITensorHandleFactory::FactoryId id,
+ MemorySource memSource) const;
+
/// Aquire memory required for inference
void AquireMemory();
diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
index 3a757d0c59..a5fe8c6a62 100644
--- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
@@ -209,7 +209,7 @@ inline void ImportNonAlignedInputPointerTest(std::vector<BackendId> backends)
NetworkId netId;
std::string ignoredErrorMessage;
// Enable Importing
- INetworkProperties networkProperties(true, false);
+ INetworkProperties networkProperties(false, MemorySource::Malloc, MemorySource::Undefined);
runtime->LoadNetwork(netId, std::move(optNet), ignoredErrorMessage, networkProperties);
// Creates structures for input & output
@@ -274,7 +274,7 @@ inline void ExportNonAlignedOutputPointerTest(std::vector<BackendId> backends)
NetworkId netId;
std::string ignoredErrorMessage;
// Enable Importing and Exporting
- INetworkProperties networkProperties(true, true);
+ INetworkProperties networkProperties(false, MemorySource::Malloc, MemorySource::Malloc);
runtime->LoadNetwork(netId, std::move(optNet), ignoredErrorMessage, networkProperties);
// Creates structures for input & output
@@ -345,7 +345,7 @@ inline void ImportAlignedPointerTest(std::vector<BackendId> backends)
NetworkId netId;
std::string ignoredErrorMessage;
// Enable Importing
- INetworkProperties networkProperties(true, true);
+ INetworkProperties networkProperties(false, MemorySource::Malloc, MemorySource::Malloc);
runtime->LoadNetwork(netId, std::move(optNet), ignoredErrorMessage, networkProperties);
// Creates structures for input & output
@@ -428,7 +428,9 @@ inline void ImportOnlyWorkload(std::vector<BackendId> backends)
// Load it into the runtime. It should pass.
NetworkId netId;
std::string ignoredErrorMessage;
- INetworkProperties networkProperties(true, false);
+
+ INetworkProperties networkProperties(false, MemorySource::Malloc, MemorySource::Undefined);
+
BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet),ignoredErrorMessage, networkProperties)
== Status::Success);
@@ -516,7 +518,7 @@ inline void ExportOnlyWorkload(std::vector<BackendId> backends)
// Load it into the runtime. It should pass.
NetworkId netId;
std::string ignoredErrorMessage;
- INetworkProperties networkProperties(false, true);
+ INetworkProperties networkProperties(false, MemorySource::Undefined, MemorySource::Malloc);
BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet),ignoredErrorMessage, networkProperties)
== Status::Success);
@@ -603,7 +605,9 @@ inline void ImportAndExportWorkload(std::vector<BackendId> backends)
// Load it into the runtime. It should pass.
NetworkId netId;
std::string ignoredErrorMessage;
- INetworkProperties networkProperties(true, true);
+
+ INetworkProperties networkProperties(false, MemorySource::Malloc, MemorySource::Malloc);
+
BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet),ignoredErrorMessage, networkProperties)
== Status::Success);
@@ -694,7 +698,7 @@ inline void ExportOutputWithSeveralOutputSlotConnectionsTest(std::vector<Backend
NetworkId netId;
std::string ignoredErrorMessage;
// Enable Importing
- INetworkProperties networkProperties(true, true);
+ INetworkProperties networkProperties(false, MemorySource::Malloc, MemorySource::Malloc);
runtime->LoadNetwork(netId, std::move(optNet), ignoredErrorMessage, networkProperties);
// Creates structures for input & output
diff --git a/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp b/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp
index 16b10c88ac..b20ff4f142 100644
--- a/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp
+++ b/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp
@@ -42,7 +42,7 @@ void AsyncThreadedEndToEndTestImpl(INetworkPtr network,
// Creates AsyncNetwork
NetworkId networkId = 0;
std::string errorMessage;
- const INetworkProperties networkProperties(false, false, true);
+ const INetworkProperties networkProperties(true, MemorySource::Undefined, MemorySource::Undefined);
runtime->LoadNetwork(networkId, std::move(optNet), errorMessage, networkProperties);
std::vector<InputTensors> inputTensorsVec;
@@ -134,7 +134,7 @@ void AsyncEndToEndTestImpl(INetworkPtr network,
// Creates AsyncNetwork
NetworkId networkId = 0;
std::string errorMessage;
- const INetworkProperties networkProperties(false, false, true);
+ const INetworkProperties networkProperties(true, MemorySource::Undefined, MemorySource::Undefined);
runtime->LoadNetwork(networkId, std::move(optNet), errorMessage, networkProperties);
InputTensors inputTensors;
diff --git a/src/backends/cl/test/ClFallbackTests.cpp b/src/backends/cl/test/ClFallbackTests.cpp
index 4384ae5fec..eec3afe447 100644
--- a/src/backends/cl/test/ClFallbackTests.cpp
+++ b/src/backends/cl/test/ClFallbackTests.cpp
@@ -11,7 +11,7 @@
BOOST_AUTO_TEST_SUITE(ClFallback)
-BOOST_AUTO_TEST_CASE(ClImportEnabledFallbackToNeon)
+BOOST_AUTO_TEST_CASE(ClImportEnabledFallbackToNeon, * boost::unit_test::disabled())
{
using namespace armnn;
@@ -78,8 +78,7 @@ BOOST_AUTO_TEST_CASE(ClImportEnabledFallbackToNeon)
// Load it into the runtime. It should pass.
NetworkId netId;
std::string ignoredErrorMessage;
- INetworkProperties networkProperties(true, true);
-
+ INetworkProperties networkProperties(false, MemorySource::Malloc, MemorySource::Malloc);
runtime->LoadNetwork(netId, std::move(optNet), ignoredErrorMessage, networkProperties);
// Creates structures for input & output
@@ -259,7 +258,7 @@ BOOST_AUTO_TEST_CASE(ClImportDisabledFallbackToNeon)
BOOST_TEST(outputData == expectedOutput);
}
-BOOST_AUTO_TEST_CASE(ClImportEnabledFallbackSubgraphToNeon)
+BOOST_AUTO_TEST_CASE(ClImportEnabledFallbackSubgraphToNeon, * boost::unit_test::disabled())
{
using namespace armnn;
@@ -337,8 +336,7 @@ BOOST_AUTO_TEST_CASE(ClImportEnabledFallbackSubgraphToNeon)
// Load it into the runtime. It should pass.
NetworkId netId;
std::string ignoredErrorMessage;
- INetworkProperties networkProperties(true, true);
-
+ INetworkProperties networkProperties(false, MemorySource::Malloc, MemorySource::Malloc);
runtime->LoadNetwork(netId, std::move(optNet), ignoredErrorMessage, networkProperties);
// Creates structures for input & output
diff --git a/src/backends/neon/test/NeonFallbackTests.cpp b/src/backends/neon/test/NeonFallbackTests.cpp
index 2d70cc2b1b..8dc592db5d 100644
--- a/src/backends/neon/test/NeonFallbackTests.cpp
+++ b/src/backends/neon/test/NeonFallbackTests.cpp
@@ -83,8 +83,7 @@ BOOST_AUTO_TEST_CASE(FallbackImportToCpuAcc)
// Load it into the runtime. It should pass.
NetworkId netId;
std::string ignoredErrorMessage;
- INetworkProperties networkProperties(true, true);
-
+ INetworkProperties networkProperties(false, MemorySource::Malloc, MemorySource::Malloc);
runtime->LoadNetwork(netId, std::move(optNet), ignoredErrorMessage, networkProperties);
// Creates structures for input & output
@@ -218,7 +217,7 @@ BOOST_AUTO_TEST_CASE(FallbackPaddingCopyToCpuAcc)
// Load it into the runtime. It should pass.
NetworkId netId;
std::string ignoredErrorMessage;
- INetworkProperties networkProperties(true, true);
+ INetworkProperties networkProperties(false, MemorySource::Malloc, MemorySource::Malloc);
runtime->LoadNetwork(netId, std::move(optNet), ignoredErrorMessage, networkProperties);
@@ -350,8 +349,8 @@ BOOST_AUTO_TEST_CASE(FallbackImportFromCpuAcc)
// Load it into the runtime. It should pass.
NetworkId netId;
std::string ignoredErrorMessage;
- INetworkProperties networkProperties(true, true);
+ INetworkProperties networkProperties(false, MemorySource::Malloc, MemorySource::Malloc);
runtime->LoadNetwork(netId, std::move(optNet), ignoredErrorMessage, networkProperties);
// Creates structures for input & output
@@ -485,7 +484,7 @@ BOOST_AUTO_TEST_CASE(FallbackPaddingCopyFromCpuAcc)
// Load it into the runtime. It should pass.
NetworkId netId;
std::string ignoredErrorMessage;
- INetworkProperties networkProperties(true, true);
+ INetworkProperties networkProperties(false, MemorySource::Malloc, MemorySource::Malloc);
runtime->LoadNetwork(netId, std::move(optNet), ignoredErrorMessage, networkProperties);
@@ -615,7 +614,7 @@ BOOST_AUTO_TEST_CASE(FallbackDisableImportFromCpuAcc)
// Load it into the runtime. It should pass.
NetworkId netId;
std::string ignoredErrorMessage;
- INetworkProperties networkProperties(false, false);
+ INetworkProperties networkProperties(false, MemorySource::Undefined, MemorySource::Undefined);
runtime->LoadNetwork(netId, std::move(optNet), ignoredErrorMessage, networkProperties);
@@ -678,7 +677,7 @@ BOOST_AUTO_TEST_CASE(FallbackDisableImportFromCpuAcc)
}
#if defined(ARMCOMPUTECL_ENABLED)
-BOOST_AUTO_TEST_CASE(NeonImportEnabledFallbackToCl)
+BOOST_AUTO_TEST_CASE(NeonImportEnabledFallbackToCl, * boost::unit_test::disabled())
{
using namespace armnn;
@@ -745,7 +744,8 @@ BOOST_AUTO_TEST_CASE(NeonImportEnabledFallbackToCl)
// Load it into the runtime. It should pass.
NetworkId netId;
std::string ignoredErrorMessage;
- INetworkProperties networkProperties(true, true);
+
+ INetworkProperties networkProperties(false, MemorySource::Malloc, MemorySource::Malloc);
runtime->LoadNetwork(netId, std::move(optNet), ignoredErrorMessage, networkProperties);
@@ -926,7 +926,7 @@ BOOST_AUTO_TEST_CASE(NeonImportDisabledFallbackToCl)
BOOST_TEST(outputData == expectedOutput);
}
-BOOST_AUTO_TEST_CASE(NeonImportEnabledFallbackSubgraphToCl)
+BOOST_AUTO_TEST_CASE(NeonImportEnabledFallbackSubgraphToCl, * boost::unit_test::disabled())
{
using namespace armnn;
@@ -1004,7 +1004,8 @@ BOOST_AUTO_TEST_CASE(NeonImportEnabledFallbackSubgraphToCl)
// Load it into the runtime. It should pass.
NetworkId netId;
std::string ignoredErrorMessage;
- INetworkProperties networkProperties(true, true);
+
+ INetworkProperties networkProperties(false, MemorySource::Malloc, MemorySource::Malloc);
runtime->LoadNetwork(netId, std::move(optNet), ignoredErrorMessage, networkProperties);
diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp
index 88c704c10e..3429598249 100644
--- a/tests/InferenceModel.hpp
+++ b/tests/InferenceModel.hpp
@@ -479,7 +479,9 @@ public:
ARMNN_SCOPED_HEAP_PROFILING("LoadNetwork");
const auto loading_start_time = armnn::GetTimeNow();
- armnn::INetworkProperties networkProperties(false, false, params.m_AsyncEnabled);
+ armnn::INetworkProperties networkProperties(params.m_AsyncEnabled,
+ armnn::MemorySource::Undefined,
+ armnn::MemorySource::Undefined);
std::string errorMessage;
ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, std::move(optNet), errorMessage, networkProperties);
diff --git a/tests/TfLiteYoloV3Big-Armnn/TfLiteYoloV3Big-Armnn.cpp b/tests/TfLiteYoloV3Big-Armnn/TfLiteYoloV3Big-Armnn.cpp
index 8ce7357962..3453fdd70b 100644
--- a/tests/TfLiteYoloV3Big-Armnn/TfLiteYoloV3Big-Armnn.cpp
+++ b/tests/TfLiteYoloV3Big-Armnn/TfLiteYoloV3Big-Armnn.cpp
@@ -147,7 +147,10 @@ int LoadModel(const char* filename,
// Load model into runtime
{
std::string errorMessage;
- INetworkProperties modelProps(options.m_ImportEnabled, options.m_ImportEnabled);
+
+ armnn::MemorySource memSource = options.m_ImportEnabled ? armnn::MemorySource::Malloc
+ : armnn::MemorySource::Undefined;
+ INetworkProperties modelProps(false, memSource, memSource);
Status status = runtime.LoadNetwork(networkId, std::move(optimizedModel), errorMessage, modelProps);
if (status != Status::Success)
{