aboutsummaryrefslogtreecommitdiff
path: root/src/armnn
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnn')
-rw-r--r--src/armnn/LayerSupport.cpp2
-rw-r--r--src/armnn/LoadedNetwork.cpp8
-rw-r--r--src/armnn/LoadedNetwork.hpp3
-rw-r--r--src/armnn/Runtime.cpp52
-rw-r--r--src/armnn/Runtime.hpp12
5 files changed, 32 insertions, 45 deletions
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp
index 846da6086e..249c8f4654 100644
--- a/src/armnn/LayerSupport.cpp
+++ b/src/armnn/LayerSupport.cpp
@@ -39,7 +39,7 @@ void CopyErrorMessage(char* truncatedString, const char* fullString, size_t maxL
bool isSupported; \
try { \
auto factoryFunc = LayerSupportRegistryInstance().GetFactory(backendId); \
- auto layerSupportObject = factoryFunc(); \
+ auto layerSupportObject = factoryFunc(EmptyInitializer()); \
isSupported = layerSupportObject->func(__VA_ARGS__, Optional<std::string&>(reasonIfUnsupportedFull)); \
CopyErrorMessage(reasonIfUnsupported, reasonIfUnsupportedFull.c_str(), reasonIfUnsupportedMaxLength); \
} catch (InvalidArgumentException e) { \
diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp
index 40137779f6..616a0327fe 100644
--- a/src/armnn/LoadedNetwork.cpp
+++ b/src/armnn/LoadedNetwork.cpp
@@ -38,6 +38,7 @@ std::string ToErrorMessage(const char * prefix, const ExceptionType & error)
} // anonymous
std::unique_ptr<LoadedNetwork> LoadedNetwork::MakeLoadedNetwork(std::unique_ptr<OptimizedNetwork> net,
+ const IRuntime::CreationOptions& options,
std::string & errorMessage)
{
std::unique_ptr<LoadedNetwork> loadedNetwork;
@@ -52,7 +53,7 @@ std::unique_ptr<LoadedNetwork> LoadedNetwork::MakeLoadedNetwork(std::unique_ptr<
try
{
- loadedNetwork.reset(new LoadedNetwork(std::move(net)));
+ loadedNetwork.reset(new LoadedNetwork(std::move(net), options));
}
catch (const armnn::RuntimeException& error)
{
@@ -70,7 +71,8 @@ std::unique_ptr<LoadedNetwork> LoadedNetwork::MakeLoadedNetwork(std::unique_ptr<
return loadedNetwork;
}
-LoadedNetwork::LoadedNetwork(std::unique_ptr<OptimizedNetwork> net)
+LoadedNetwork::LoadedNetwork(std::unique_ptr<OptimizedNetwork> net,
+ const IRuntime::CreationOptions& options)
: m_OptimizedNetwork(std::move(net))
, m_WorkingMemLock(m_WorkingMemMutex, std::defer_lock)
{
@@ -89,7 +91,7 @@ LoadedNetwork::LoadedNetwork(std::unique_ptr<OptimizedNetwork> net)
if (m_Backends.count(backend) == 0)
{
auto createBackend = BackendRegistryInstance().GetFactory(backend);
- auto it = m_Backends.emplace(std::make_pair(backend, createBackend()));
+ auto it = m_Backends.emplace(std::make_pair(backend, createBackend(EmptyInitializer())));
m_WorkloadFactories.emplace(std::make_pair(backend,
it.first->second->CreateWorkloadFactory()));
}
diff --git a/src/armnn/LoadedNetwork.hpp b/src/armnn/LoadedNetwork.hpp
index 51eb04f3df..21de1440f1 100644
--- a/src/armnn/LoadedNetwork.hpp
+++ b/src/armnn/LoadedNetwork.hpp
@@ -40,6 +40,7 @@ public:
Status EnqueueWorkload(const InputTensors& inputTensors, const OutputTensors& outputTensors);
static std::unique_ptr<LoadedNetwork> MakeLoadedNetwork(std::unique_ptr<OptimizedNetwork> net,
+ const IRuntime::CreationOptions& options,
std::string & errorMessage);
// NOTE we return by reference as the purpose of this method is only to provide
@@ -51,7 +52,7 @@ public:
void FreeWorkingMemory();
private:
- LoadedNetwork(std::unique_ptr<OptimizedNetwork> net);
+ LoadedNetwork(std::unique_ptr<OptimizedNetwork> net, const IRuntime::CreationOptions& options);
void EnqueueInput(const BindableLayer& layer, ITensorHandle* tensorHandle, const TensorInfo& tensorInfo);
diff --git a/src/armnn/Runtime.cpp b/src/armnn/Runtime.cpp
index e84cbe0a60..a0aca6ebc7 100644
--- a/src/armnn/Runtime.cpp
+++ b/src/armnn/Runtime.cpp
@@ -6,15 +6,10 @@
#include <armnn/Version.hpp>
#include <backends/BackendRegistry.hpp>
+#include <backends/BackendContextRegistry.hpp>
#include <iostream>
-#ifdef ARMCOMPUTECL_ENABLED
-#include <arm_compute/core/CL/OpenCL.h>
-#include <arm_compute/core/CL/CLKernelLibrary.h>
-#include <arm_compute/runtime/CL/CLScheduler.h>
-#endif
-
#include <boost/log/trivial.hpp>
#include <boost/polymorphic_cast.hpp>
@@ -57,6 +52,7 @@ Status Runtime::LoadNetwork(NetworkId& networkIdOut,
IOptimizedNetwork* rawNetwork = inNetwork.release();
unique_ptr<LoadedNetwork> loadedNetwork = LoadedNetwork::MakeLoadedNetwork(
std::unique_ptr<OptimizedNetwork>(boost::polymorphic_downcast<OptimizedNetwork*>(rawNetwork)),
+ m_Options,
errorMessage);
if (!loadedNetwork)
@@ -78,24 +74,6 @@ Status Runtime::LoadNetwork(NetworkId& networkIdOut,
Status Runtime::UnloadNetwork(NetworkId networkId)
{
-#ifdef ARMCOMPUTECL_ENABLED
- if (arm_compute::CLScheduler::get().context()() != NULL)
- {
- // Waits for all queued CL requests to finish before unloading the network they may be using.
- try
- {
- // Coverity fix: arm_compute::CLScheduler::sync() may throw an exception of type cl::Error.
- arm_compute::CLScheduler::get().sync();
- }
- catch (const cl::Error&)
- {
- BOOST_LOG_TRIVIAL(warning) << "WARNING: Runtime::UnloadNetwork(): an error occurred while waiting for "
- "the queued CL requests to finish";
- return Status::Failure;
- }
- }
-#endif
-
{
std::lock_guard<std::mutex> lockGuard(m_Mutex);
@@ -104,14 +82,6 @@ Status Runtime::UnloadNetwork(NetworkId networkId)
BOOST_LOG_TRIVIAL(warning) << "WARNING: Runtime::UnloadNetwork(): " << networkId << " not found!";
return Status::Failure;
}
-
-#ifdef ARMCOMPUTECL_ENABLED
- if (arm_compute::CLScheduler::get().context()() != NULL && m_LoadedNetworks.empty())
- {
- // There are no loaded networks left, so clear the CL cache to free up memory
- m_ClContextControl.ClearClCache();
- }
-#endif
}
BOOST_LOG_TRIVIAL(debug) << "Runtime::UnloadNetwork(): Unloaded network with ID: " << networkId;
@@ -131,12 +101,26 @@ const std::shared_ptr<IProfiler> Runtime::GetProfiler(NetworkId networkId) const
}
Runtime::Runtime(const CreationOptions& options)
- : m_ClContextControl(options.m_GpuAccTunedParameters.get(),
- options.m_EnableGpuProfiling)
+ : m_Options{options}
, m_NetworkIdCounter(0)
, m_DeviceSpec{BackendRegistryInstance().GetBackendIds()}
{
BOOST_LOG_TRIVIAL(info) << "ArmNN v" << ARMNN_VERSION << "\n";
+
+ for (const auto& id : BackendContextRegistryInstance().GetBackendIds())
+ {
+ // Store backend contexts for the supported ones
+ if (m_DeviceSpec.GetSupportedBackends().count(id) > 0)
+ {
+ // Don't throw an exception, rather return a dummy factory if not
+ // found.
+ auto factoryFun = BackendContextRegistryInstance().GetFactory(
+ id, [](const CreationOptions&) { return IBackendContextUniquePtr(); }
+ );
+
+ m_BackendContexts.emplace(std::make_pair(id, factoryFun(options)));
+ }
+ }
}
Runtime::~Runtime()
diff --git a/src/armnn/Runtime.hpp b/src/armnn/Runtime.hpp
index 29bb6808d8..2679e3cc98 100644
--- a/src/armnn/Runtime.hpp
+++ b/src/armnn/Runtime.hpp
@@ -9,7 +9,8 @@
#include <armnn/INetwork.hpp>
#include <armnn/IRuntime.hpp>
#include <armnn/Tensor.hpp>
-#include <backends/cl/ClContextControl.hpp>
+#include <armnn/BackendId.hpp>
+#include <backends/IBackendContext.hpp>
#include <mutex>
#include <unordered_map>
@@ -85,14 +86,13 @@ private:
}
mutable std::mutex m_Mutex;
-
std::unordered_map<NetworkId, std::unique_ptr<LoadedNetwork>> m_LoadedNetworks;
-
- ClContextControl m_ClContextControl;
-
+ CreationOptions m_Options;
int m_NetworkIdCounter;
-
DeviceSpec m_DeviceSpec;
+
+ using BackendContextMap = std::unordered_map<BackendId, IBackendContextUniquePtr>;
+ BackendContextMap m_BackendContexts;
};
}