diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/armnn/LoadedNetwork.cpp | 58 | ||||
-rw-r--r-- | src/armnn/LoadedNetwork.hpp | 13 | ||||
-rw-r--r-- | src/armnn/Network.cpp | 8 | ||||
-rw-r--r-- | src/armnnUtils/GraphTopologicalSort.hpp | 8 | ||||
-rw-r--r-- | src/backends/BackendRegistry.hpp | 3 | ||||
-rw-r--r-- | src/backends/IBackendInternal.hpp | 14 | ||||
-rw-r--r-- | src/backends/WorkloadFactory.cpp | 54 | ||||
-rw-r--r-- | src/backends/WorkloadFactory.hpp | 6 | ||||
-rw-r--r-- | src/backends/cl/ClBackend.cpp | 9 | ||||
-rw-r--r-- | src/backends/cl/ClBackend.hpp | 4 | ||||
-rw-r--r-- | src/backends/cl/ClWorkloadFactory.cpp | 2 | ||||
-rw-r--r-- | src/backends/cl/ClWorkloadFactory.hpp | 6 | ||||
-rw-r--r-- | src/backends/neon/NeonBackend.cpp | 9 | ||||
-rw-r--r-- | src/backends/neon/NeonBackend.hpp | 4 | ||||
-rw-r--r-- | src/backends/neon/NeonWorkloadFactory.cpp | 3 | ||||
-rw-r--r-- | src/backends/neon/NeonWorkloadFactory.hpp | 6 | ||||
-rw-r--r-- | src/backends/reference/RefBackend.cpp | 9 | ||||
-rw-r--r-- | src/backends/reference/RefBackend.hpp | 4 | ||||
-rw-r--r-- | src/backends/reference/RefWorkloadFactory.cpp | 3 | ||||
-rw-r--r-- | src/backends/reference/RefWorkloadFactory.hpp | 6 | ||||
-rw-r--r-- | src/backends/test/BackendRegistryTests.cpp | 4 |
21 files changed, 120 insertions, 113 deletions
diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp index 7aa66d9b09..40137779f6 100644 --- a/src/armnn/LoadedNetwork.cpp +++ b/src/armnn/LoadedNetwork.cpp @@ -12,6 +12,7 @@ #include "HeapProfiling.hpp" #include <backends/CpuTensorHandle.hpp> +#include <backends/BackendRegistry.hpp> #include <boost/polymorphic_cast.hpp> #include <boost/assert.hpp> @@ -70,8 +71,7 @@ std::unique_ptr<LoadedNetwork> LoadedNetwork::MakeLoadedNetwork(std::unique_ptr< } LoadedNetwork::LoadedNetwork(std::unique_ptr<OptimizedNetwork> net) - : m_CpuRef() - , m_OptimizedNetwork(std::move(net)) + : m_OptimizedNetwork(std::move(net)) , m_WorkingMemLock(m_WorkingMemMutex, std::defer_lock) { // Create a profiler and register it for the current thread. @@ -79,12 +79,20 @@ LoadedNetwork::LoadedNetwork(std::unique_ptr<OptimizedNetwork> net) ProfilerManager::GetInstance().RegisterProfiler(m_Profiler.get()); Graph& order = m_OptimizedNetwork->GetGraph().TopologicalSort(); - //First create tensor handlers. + //First create tensor handlers, backends and workload factories. //Handlers are created before workloads are. //Because workload creation can modify some of the handlers, //(for example the splitter and merger layers). for (auto&& layer : order) { + auto const& backend = layer->GetBackendId(); + if (m_Backends.count(backend) == 0) + { + auto createBackend = BackendRegistryInstance().GetFactory(backend); + auto it = m_Backends.emplace(std::make_pair(backend, createBackend())); + m_WorkloadFactories.emplace(std::make_pair(backend, + it.first->second->CreateWorkloadFactory())); + } layer->CreateTensorHandles(m_OptimizedNetwork->GetGraph(), GetWorkloadFactory(*layer)); } @@ -126,9 +134,10 @@ LoadedNetwork::LoadedNetwork(std::unique_ptr<OptimizedNetwork> net) m_OptimizedNetwork->GetGraph().AllocateDynamicBuffers(); // Finalize the workload factories before execution. - m_CpuRef.Finalize(); - m_CpuAcc.Finalize(); - m_GpuAcc.Finalize(); + for (auto&& workloadFactory : m_WorkloadFactories) + { + workloadFactory.second->Finalize(); + } } TensorInfo LoadedNetwork::GetInputTensorInfo(LayerBindingId layerId) const @@ -164,26 +173,25 @@ const IWorkloadFactory& LoadedNetwork::GetWorkloadFactory(const Layer& layer) co { const IWorkloadFactory* workloadFactory = nullptr; - if (layer.GetBackendId() == Compute::CpuAcc) - { - workloadFactory = &m_CpuAcc; - } - else if (layer.GetBackendId() == Compute::GpuAcc) - { - workloadFactory = &m_GpuAcc; - } - else if (layer.GetBackendId() == Compute::CpuRef) + auto it = m_WorkloadFactories.find(layer.GetBackendId()); + if (it == m_WorkloadFactories.end()) { - workloadFactory = &m_CpuRef; + throw RuntimeException( + boost::str( + boost::format("No workload factory for %1% to be used for layer: %2%") + % layer.GetBackendId().Get() + % layer.GetNameStr()), + CHECK_LOCATION()); } + workloadFactory = it->second.get(); + BOOST_ASSERT_MSG(workloadFactory, "No workload factory"); std::string reasonIfUnsupported; BOOST_ASSERT_MSG(IWorkloadFactory::IsLayerSupported(layer, {}, reasonIfUnsupported), - "Factory does not support layer"); + "Factory does not support layer"); boost::ignore_unused(reasonIfUnsupported); - return *workloadFactory; } @@ -408,9 +416,10 @@ void LoadedNetwork::AllocateWorkingMemory() { return; } - m_CpuRef.Acquire(); - m_CpuAcc.Acquire(); - m_GpuAcc.Acquire(); + for (auto&& workloadFactory : m_WorkloadFactories) + { + workloadFactory.second->Acquire(); + } m_IsWorkingMemAllocated = true; } @@ -422,9 +431,10 @@ void LoadedNetwork::FreeWorkingMemory() return; } // Informs the memory managers to release memory in it's respective memory group - m_CpuRef.Release(); - m_CpuAcc.Release(); - m_GpuAcc.Release(); + for (auto&& workloadFactory : m_WorkloadFactories) + { + workloadFactory.second->Release(); + } m_IsWorkingMemAllocated = false; } diff --git a/src/armnn/LoadedNetwork.hpp b/src/armnn/LoadedNetwork.hpp index 3deb8bc2e2..51eb04f3df 100644 --- a/src/armnn/LoadedNetwork.hpp +++ b/src/armnn/LoadedNetwork.hpp @@ -11,13 +11,12 @@ #include "LayerFwd.hpp" #include "Profiling.hpp" -#include <backends/reference/RefWorkloadFactory.hpp> -#include <backends/neon/NeonWorkloadFactory.hpp> -#include <backends/cl/ClWorkloadFactory.hpp> +#include <backends/IBackendInternal.hpp> #include <backends/Workload.hpp> #include <backends/WorkloadFactory.hpp> #include <mutex> +#include <unordered_map> namespace cl { @@ -62,9 +61,11 @@ private: const IWorkloadFactory& GetWorkloadFactory(const Layer& layer) const; - RefWorkloadFactory m_CpuRef; - NeonWorkloadFactory m_CpuAcc; - ClWorkloadFactory m_GpuAcc; + using BackendPtrMap = std::unordered_map<BackendId, IBackendInternalUniquePtr>; + using WorkloadFactoryMap = std::unordered_map<BackendId, IBackendInternal::IWorkloadFactoryPtr>; + + BackendPtrMap m_Backends; + WorkloadFactoryMap m_WorkloadFactories; std::unique_ptr<OptimizedNetwork> m_OptimizedNetwork; WorkloadQueue m_InputQueue; diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index f95e829cb9..cab5106959 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -211,7 +211,9 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork, // Try preferred backend first layer->SetBackendId(preferredBackend); - if (IWorkloadFactory::IsLayerSupported(*layer, boost::none, reasonIfUnsupported)) + if (IWorkloadFactory::IsLayerSupported(*layer, + EmptyOptional(), + reasonIfUnsupported)) { supportedBackendFound = true; } @@ -226,7 +228,9 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork, } layer->SetBackendId(backend); - if (IWorkloadFactory::IsLayerSupported(*layer, boost::none, reasonIfUnsupported)) + if (IWorkloadFactory::IsLayerSupported(*layer, + EmptyOptional(), + reasonIfUnsupported)) { supportedBackendFound = true; break; diff --git a/src/armnnUtils/GraphTopologicalSort.hpp b/src/armnnUtils/GraphTopologicalSort.hpp index 81a37ac4e5..11314590a0 100644 --- a/src/armnnUtils/GraphTopologicalSort.hpp +++ b/src/armnnUtils/GraphTopologicalSort.hpp @@ -4,8 +4,8 @@ // #pragma once +#include <armnn/Optional.hpp> #include <boost/assert.hpp> -#include <boost/optional.hpp> #include <functional> #include <map> @@ -27,7 +27,7 @@ enum class NodeState template <typename TNodeId> -boost::optional<TNodeId> GetNextChild(TNodeId node, +armnn::Optional<TNodeId> GetNextChild(TNodeId node, std::function<std::vector<TNodeId>(TNodeId)> getIncomingEdges, std::map<TNodeId, NodeState>& nodeStates) { @@ -70,11 +70,11 @@ bool TopologicallySort( nodeStates[current] = NodeState::Visiting; - boost::optional<TNodeId> nextChildOfCurrent = GetNextChild(current, getIncomingEdges, nodeStates); + auto nextChildOfCurrent = GetNextChild(current, getIncomingEdges, nodeStates); if (nextChildOfCurrent) { - TNodeId nextChild = nextChildOfCurrent.get(); + TNodeId nextChild = nextChildOfCurrent.value(); // If the child has not been searched, add to the stack and iterate over this node if (nodeStates.find(nextChild) == nodeStates.end()) diff --git a/src/backends/BackendRegistry.hpp b/src/backends/BackendRegistry.hpp index 23cb37da99..4465e95174 100644 --- a/src/backends/BackendRegistry.hpp +++ b/src/backends/BackendRegistry.hpp @@ -6,11 +6,12 @@ #include <armnn/Types.hpp> #include "RegistryCommon.hpp" +#include "IBackendInternal.hpp" namespace armnn { -using BackendRegistry = RegistryCommon<IBackend, IBackendUniquePtr>; +using BackendRegistry = RegistryCommon<IBackendInternal, IBackendInternalUniquePtr>; BackendRegistry& BackendRegistryInstance(); diff --git a/src/backends/IBackendInternal.hpp b/src/backends/IBackendInternal.hpp index a441abd79f..7e44dbd676 100644 --- a/src/backends/IBackendInternal.hpp +++ b/src/backends/IBackendInternal.hpp @@ -5,19 +5,27 @@ #pragma once #include <armnn/Types.hpp> -#include <backends/WorkloadFactory.hpp> namespace armnn { +class IWorkloadFactory; class IBackendInternal : public IBackend { protected: + // Creation must be done through a specific + // backend interface. IBackendInternal() = default; - ~IBackendInternal() override = default; public: - virtual std::unique_ptr<IWorkloadFactory> CreateWorkloadFactory() const = 0; + // Allow backends created by the factory function + // to be destroyed through IBackendInternal. + ~IBackendInternal() override = default; + + using IWorkloadFactoryPtr = std::unique_ptr<IWorkloadFactory>; + virtual IWorkloadFactoryPtr CreateWorkloadFactory() const = 0; }; +using IBackendInternalUniquePtr = std::unique_ptr<IBackendInternal>; + } // namespace armnn diff --git a/src/backends/WorkloadFactory.cpp b/src/backends/WorkloadFactory.cpp index e7dec49db4..fea383f030 100644 --- a/src/backends/WorkloadFactory.cpp +++ b/src/backends/WorkloadFactory.cpp @@ -5,10 +5,6 @@ #include <backends/WorkloadFactory.hpp> #include <backends/LayerSupportRegistry.hpp> -#include <backends/reference/RefWorkloadFactory.hpp> -#include <backends/neon/NeonWorkloadFactory.hpp> -#include <backends/cl/ClWorkloadFactory.hpp> - #include <armnn/Types.hpp> #include <armnn/LayerSupport.hpp> #include <Layer.hpp> @@ -24,40 +20,42 @@ namespace armnn namespace { - const TensorInfo OverrideDataType(const TensorInfo& info, boost::optional<DataType> type) + +const TensorInfo OverrideDataType(const TensorInfo& info, Optional<DataType> type) +{ + if (!type) { - if (type == boost::none) - { - return info; - } + return info; + } + + return TensorInfo(info.GetShape(), type.value(), info.GetQuantizationScale(), info.GetQuantizationOffset()); +} - return TensorInfo(info.GetShape(), type.get(), info.GetQuantizationScale(), info.GetQuantizationOffset()); +Optional<DataType> GetBiasTypeFromWeightsType(Optional<DataType> weightsType) +{ + if (!weightsType) + { + return weightsType; } - boost::optional<DataType> GetBiasTypeFromWeightsType(boost::optional<DataType> weightsType) + switch(weightsType.value()) { - if (weightsType == boost::none) - { + case DataType::Float16: + case DataType::Float32: return weightsType; - } - - switch(weightsType.get()) - { - case DataType::Float16: - case DataType::Float32: - return weightsType; - case DataType::QuantisedAsymm8: - return DataType::Signed32; - default: - BOOST_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type."); - } - return boost::none; + case DataType::QuantisedAsymm8: + return DataType::Signed32; + default: + BOOST_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type."); } + return EmptyOptional(); } +} // anonymous namespace + bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, const IConnectableLayer& connectableLayer, - boost::optional<DataType> dataType, + Optional<DataType> dataType, std::string& outReasonIfUnsupported) { Optional<std::string&> reason = outReasonIfUnsupported; @@ -589,7 +587,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, } bool IWorkloadFactory::IsLayerSupported(const IConnectableLayer& connectableLayer, - boost::optional<DataType> dataType, + Optional<DataType> dataType, std::string& outReasonIfUnsupported) { auto layer = boost::polymorphic_downcast<const Layer*>(&connectableLayer); diff --git a/src/backends/WorkloadFactory.hpp b/src/backends/WorkloadFactory.hpp index 41d6741ae7..2d482e0911 100644 --- a/src/backends/WorkloadFactory.hpp +++ b/src/backends/WorkloadFactory.hpp @@ -6,9 +6,9 @@ #include <memory> #include <armnn/TensorFwd.hpp> +#include <armnn/Optional.hpp> #include <backends/OutputHandler.hpp> #include <backends/Workload.hpp> -#include <boost/optional.hpp> namespace armnn { @@ -34,11 +34,11 @@ public: static bool IsLayerSupported(const BackendId& backendId, const IConnectableLayer& layer, - boost::optional<DataType> dataType, + Optional<DataType> dataType, std::string& outReasonIfUnsupported); static bool IsLayerSupported(const IConnectableLayer& layer, - boost::optional<DataType> dataType, + Optional<DataType> dataType, std::string& outReasonIfUnsupported); virtual bool SupportsSubTensors() const = 0; diff --git a/src/backends/cl/ClBackend.cpp b/src/backends/cl/ClBackend.cpp index 1bab96b49a..d6a3a89391 100644 --- a/src/backends/cl/ClBackend.cpp +++ b/src/backends/cl/ClBackend.cpp @@ -23,7 +23,7 @@ static StaticRegistryInitializer<BackendRegistry> g_RegisterHelper ClBackend::GetIdStatic(), []() { - return IBackendUniquePtr(new ClBackend, &ClBackend::Destroy); + return IBackendInternalUniquePtr(new ClBackend); } }; @@ -35,14 +35,9 @@ const BackendId& ClBackend::GetIdStatic() return s_Id; } -std::unique_ptr<IWorkloadFactory> ClBackend::CreateWorkloadFactory() const +IBackendInternal::IWorkloadFactoryPtr ClBackend::CreateWorkloadFactory() const { return std::make_unique<ClWorkloadFactory>(); } -void ClBackend::Destroy(IBackend* backend) -{ - delete boost::polymorphic_downcast<ClBackend*>(backend); -} - } // namespace armnn
\ No newline at end of file diff --git a/src/backends/cl/ClBackend.hpp b/src/backends/cl/ClBackend.hpp index 49a7a466c4..4eae6c92ec 100644 --- a/src/backends/cl/ClBackend.hpp +++ b/src/backends/cl/ClBackend.hpp @@ -18,9 +18,7 @@ public: static const BackendId& GetIdStatic(); const BackendId& GetId() const override { return GetIdStatic(); } - std::unique_ptr<IWorkloadFactory> CreateWorkloadFactory() const override; - - static void Destroy(IBackend* backend); + IWorkloadFactoryPtr CreateWorkloadFactory() const override; }; } // namespace armnn
\ No newline at end of file diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp index e1d8314d82..c697d90950 100644 --- a/src/backends/cl/ClWorkloadFactory.cpp +++ b/src/backends/cl/ClWorkloadFactory.cpp @@ -35,7 +35,7 @@ namespace armnn { bool ClWorkloadFactory::IsLayerSupported(const Layer& layer, - boost::optional<DataType> dataType, + Optional<DataType> dataType, std::string& outReasonIfUnsupported) { return IWorkloadFactory::IsLayerSupported(Compute::GpuAcc, layer, dataType, outReasonIfUnsupported); diff --git a/src/backends/cl/ClWorkloadFactory.hpp b/src/backends/cl/ClWorkloadFactory.hpp index 66de3a50f1..1441b71e61 100644 --- a/src/backends/cl/ClWorkloadFactory.hpp +++ b/src/backends/cl/ClWorkloadFactory.hpp @@ -5,12 +5,11 @@ #pragma once #include <armnn/IRuntime.hpp> +#include <armnn/Optional.hpp> #include <backends/OutputHandler.hpp> #include <backends/aclCommon/memory/BaseMemoryManager.hpp> -#include <boost/optional.hpp> - namespace armnn { @@ -22,7 +21,8 @@ public: virtual Compute GetCompute() const override { return Compute::GpuAcc; } - static bool IsLayerSupported(const Layer& layer, boost::optional<DataType> dataType, + static bool IsLayerSupported(const Layer& layer, + Optional<DataType> dataType, std::string& outReasonIfUnsupported); virtual bool SupportsSubTensors() const override { return true; } diff --git a/src/backends/neon/NeonBackend.cpp b/src/backends/neon/NeonBackend.cpp index b7102956df..e475f0232b 100644 --- a/src/backends/neon/NeonBackend.cpp +++ b/src/backends/neon/NeonBackend.cpp @@ -23,7 +23,7 @@ static StaticRegistryInitializer<BackendRegistry> g_RegisterHelper NeonBackend::GetIdStatic(), []() { - return IBackendUniquePtr(new NeonBackend, &NeonBackend::Destroy); + return IBackendInternalUniquePtr(new NeonBackend); } }; @@ -35,14 +35,9 @@ const BackendId& NeonBackend::GetIdStatic() return s_Id; } -std::unique_ptr<IWorkloadFactory> NeonBackend::CreateWorkloadFactory() const +IBackendInternal::IWorkloadFactoryPtr NeonBackend::CreateWorkloadFactory() const { return std::make_unique<NeonWorkloadFactory>(); } -void NeonBackend::Destroy(IBackend* backend) -{ - delete boost::polymorphic_downcast<NeonBackend*>(backend); -} - } // namespace armnn
\ No newline at end of file diff --git a/src/backends/neon/NeonBackend.hpp b/src/backends/neon/NeonBackend.hpp index 6280610bda..e1287c784c 100644 --- a/src/backends/neon/NeonBackend.hpp +++ b/src/backends/neon/NeonBackend.hpp @@ -18,9 +18,7 @@ public: static const BackendId& GetIdStatic(); const BackendId& GetId() const override { return GetIdStatic(); } - std::unique_ptr<IWorkloadFactory> CreateWorkloadFactory() const override; - - static void Destroy(IBackend* backend); + IWorkloadFactoryPtr CreateWorkloadFactory() const override; }; } // namespace armnn
\ No newline at end of file diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp index 0e069a2f64..f0a9e76de1 100644 --- a/src/backends/neon/NeonWorkloadFactory.cpp +++ b/src/backends/neon/NeonWorkloadFactory.cpp @@ -25,7 +25,8 @@ namespace armnn { -bool NeonWorkloadFactory::IsLayerSupported(const Layer& layer, boost::optional<DataType> dataType, +bool NeonWorkloadFactory::IsLayerSupported(const Layer& layer, + Optional<DataType> dataType, std::string& outReasonIfUnsupported) { return IWorkloadFactory::IsLayerSupported(Compute::CpuAcc, layer, dataType, outReasonIfUnsupported); diff --git a/src/backends/neon/NeonWorkloadFactory.hpp b/src/backends/neon/NeonWorkloadFactory.hpp index 64951612c1..d1dd2c85fe 100644 --- a/src/backends/neon/NeonWorkloadFactory.hpp +++ b/src/backends/neon/NeonWorkloadFactory.hpp @@ -4,11 +4,12 @@ // #pragma once +#include <armnn/Optional.hpp> #include <backends/OutputHandler.hpp> #include <backends/aclCommon/memory/BaseMemoryManager.hpp> #include <boost/core/ignore_unused.hpp> -#include <boost/optional.hpp> + namespace armnn { @@ -21,7 +22,8 @@ public: virtual Compute GetCompute() const override { return Compute::CpuAcc; } - static bool IsLayerSupported(const Layer& layer, boost::optional<DataType> dataType, + static bool IsLayerSupported(const Layer& layer, + Optional<DataType> dataType, std::string& outReasonIfUnsupported); virtual bool SupportsSubTensors() const override { return true; } diff --git a/src/backends/reference/RefBackend.cpp b/src/backends/reference/RefBackend.cpp index 9afb42d59f..34348fa120 100644 --- a/src/backends/reference/RefBackend.cpp +++ b/src/backends/reference/RefBackend.cpp @@ -23,7 +23,7 @@ static StaticRegistryInitializer<BackendRegistry> g_RegisterHelper RefBackend::GetIdStatic(), []() { - return IBackendUniquePtr(new RefBackend, &RefBackend::Destroy); + return IBackendInternalUniquePtr(new RefBackend); } }; @@ -35,14 +35,9 @@ const BackendId& RefBackend::GetIdStatic() return s_Id; } -std::unique_ptr<IWorkloadFactory> RefBackend::CreateWorkloadFactory() const +IBackendInternal::IWorkloadFactoryPtr RefBackend::CreateWorkloadFactory() const { return std::make_unique<RefWorkloadFactory>(); } -void RefBackend::Destroy(IBackend* backend) -{ - delete boost::polymorphic_downcast<RefBackend*>(backend); -} - } // namespace armnn
\ No newline at end of file diff --git a/src/backends/reference/RefBackend.hpp b/src/backends/reference/RefBackend.hpp index 0cd3cf4dce..7162c9bf40 100644 --- a/src/backends/reference/RefBackend.hpp +++ b/src/backends/reference/RefBackend.hpp @@ -18,9 +18,7 @@ public: static const BackendId& GetIdStatic(); const BackendId& GetId() const override { return GetIdStatic(); } - std::unique_ptr<IWorkloadFactory> CreateWorkloadFactory() const override; - - static void Destroy(IBackend* backend); + IWorkloadFactoryPtr CreateWorkloadFactory() const override; }; } // namespace armnn
\ No newline at end of file diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp index 048f6cdcc4..783e5fba2e 100644 --- a/src/backends/reference/RefWorkloadFactory.cpp +++ b/src/backends/reference/RefWorkloadFactory.cpp @@ -25,7 +25,8 @@ RefWorkloadFactory::RefWorkloadFactory() { } -bool RefWorkloadFactory::IsLayerSupported(const Layer& layer, boost::optional<DataType> dataType, +bool RefWorkloadFactory::IsLayerSupported(const Layer& layer, + Optional<DataType> dataType, std::string& outReasonIfUnsupported) { return IWorkloadFactory::IsLayerSupported(Compute::CpuRef, layer, dataType, outReasonIfUnsupported); diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp index 1a9227a978..ef2e1abfaa 100644 --- a/src/backends/reference/RefWorkloadFactory.hpp +++ b/src/backends/reference/RefWorkloadFactory.hpp @@ -4,11 +4,12 @@ // #pragma once +#include <armnn/Optional.hpp> #include <backends/WorkloadFactory.hpp> #include <backends/OutputHandler.hpp> #include <boost/core/ignore_unused.hpp> -#include <boost/optional.hpp> + namespace armnn { @@ -34,7 +35,8 @@ public: virtual Compute GetCompute() const override { return Compute::CpuRef; } - static bool IsLayerSupported(const Layer& layer, boost::optional<DataType> dataType, + static bool IsLayerSupported(const Layer& layer, + Optional<DataType> dataType, std::string& outReasonIfUnsupported); virtual bool SupportsSubTensors() const override { return false; } diff --git a/src/backends/test/BackendRegistryTests.cpp b/src/backends/test/BackendRegistryTests.cpp index f6f749936f..34a2706466 100644 --- a/src/backends/test/BackendRegistryTests.cpp +++ b/src/backends/test/BackendRegistryTests.cpp @@ -55,7 +55,7 @@ BOOST_AUTO_TEST_CASE(TestRegistryHelper) [&called]() { called = true; - return armnn::IBackendUniquePtr(nullptr, nullptr); + return armnn::IBackendInternalUniquePtr(nullptr); } ); @@ -82,7 +82,7 @@ BOOST_AUTO_TEST_CASE(TestDirectCallToRegistry) [&called]() { called = true; - return armnn::IBackendUniquePtr(nullptr, nullptr); + return armnn::IBackendInternalUniquePtr(nullptr); } ); |