From 29c75de868ac3a86a70b25f8da0d0c7e47d40803 Mon Sep 17 00:00:00 2001 From: David Beck Date: Tue, 23 Oct 2018 13:35:58 +0100 Subject: IVGCVSW-2067 : dynamically create workload factories based on the backends in the network Change-Id: Ide594db8c79ff67642721d8bad47624b88621fbd --- src/armnn/LoadedNetwork.cpp | 58 ++++++++++++++++----------- src/armnn/LoadedNetwork.hpp | 13 +++--- src/armnn/Network.cpp | 8 +++- src/armnnUtils/GraphTopologicalSort.hpp | 8 ++-- src/backends/BackendRegistry.hpp | 3 +- src/backends/IBackendInternal.hpp | 14 +++++-- src/backends/WorkloadFactory.cpp | 54 ++++++++++++------------- src/backends/WorkloadFactory.hpp | 6 +-- src/backends/cl/ClBackend.cpp | 9 +---- src/backends/cl/ClBackend.hpp | 4 +- src/backends/cl/ClWorkloadFactory.cpp | 2 +- src/backends/cl/ClWorkloadFactory.hpp | 6 +-- src/backends/neon/NeonBackend.cpp | 9 +---- src/backends/neon/NeonBackend.hpp | 4 +- src/backends/neon/NeonWorkloadFactory.cpp | 3 +- src/backends/neon/NeonWorkloadFactory.hpp | 6 ++- src/backends/reference/RefBackend.cpp | 9 +---- src/backends/reference/RefBackend.hpp | 4 +- src/backends/reference/RefWorkloadFactory.cpp | 3 +- src/backends/reference/RefWorkloadFactory.hpp | 6 ++- src/backends/test/BackendRegistryTests.cpp | 4 +- tests/CMakeLists.txt | 12 +++--- tests/InferenceModel.hpp | 2 +- 23 files changed, 127 insertions(+), 120 deletions(-) diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp index 7aa66d9b09..40137779f6 100644 --- a/src/armnn/LoadedNetwork.cpp +++ b/src/armnn/LoadedNetwork.cpp @@ -12,6 +12,7 @@ #include "HeapProfiling.hpp" #include +#include #include #include @@ -70,8 +71,7 @@ std::unique_ptr LoadedNetwork::MakeLoadedNetwork(std::unique_ptr< } LoadedNetwork::LoadedNetwork(std::unique_ptr net) - : m_CpuRef() - , m_OptimizedNetwork(std::move(net)) + : m_OptimizedNetwork(std::move(net)) , m_WorkingMemLock(m_WorkingMemMutex, std::defer_lock) { // Create a profiler and register it for the current thread. @@ -79,12 +79,20 @@ LoadedNetwork::LoadedNetwork(std::unique_ptr net) ProfilerManager::GetInstance().RegisterProfiler(m_Profiler.get()); Graph& order = m_OptimizedNetwork->GetGraph().TopologicalSort(); - //First create tensor handlers. + //First create tensor handlers, backends and workload factories. //Handlers are created before workloads are. //Because workload creation can modify some of the handlers, //(for example the splitter and merger layers). for (auto&& layer : order) { + auto const& backend = layer->GetBackendId(); + if (m_Backends.count(backend) == 0) + { + auto createBackend = BackendRegistryInstance().GetFactory(backend); + auto it = m_Backends.emplace(std::make_pair(backend, createBackend())); + m_WorkloadFactories.emplace(std::make_pair(backend, + it.first->second->CreateWorkloadFactory())); + } layer->CreateTensorHandles(m_OptimizedNetwork->GetGraph(), GetWorkloadFactory(*layer)); } @@ -126,9 +134,10 @@ LoadedNetwork::LoadedNetwork(std::unique_ptr net) m_OptimizedNetwork->GetGraph().AllocateDynamicBuffers(); // Finalize the workload factories before execution. - m_CpuRef.Finalize(); - m_CpuAcc.Finalize(); - m_GpuAcc.Finalize(); + for (auto&& workloadFactory : m_WorkloadFactories) + { + workloadFactory.second->Finalize(); + } } TensorInfo LoadedNetwork::GetInputTensorInfo(LayerBindingId layerId) const @@ -164,26 +173,25 @@ const IWorkloadFactory& LoadedNetwork::GetWorkloadFactory(const Layer& layer) co { const IWorkloadFactory* workloadFactory = nullptr; - if (layer.GetBackendId() == Compute::CpuAcc) - { - workloadFactory = &m_CpuAcc; - } - else if (layer.GetBackendId() == Compute::GpuAcc) - { - workloadFactory = &m_GpuAcc; - } - else if (layer.GetBackendId() == Compute::CpuRef) + auto it = m_WorkloadFactories.find(layer.GetBackendId()); + if (it == m_WorkloadFactories.end()) { - workloadFactory = &m_CpuRef; + throw RuntimeException( + boost::str( + boost::format("No workload factory for %1% to be used for layer: %2%") + % layer.GetBackendId().Get() + % layer.GetNameStr()), + CHECK_LOCATION()); } + workloadFactory = it->second.get(); + BOOST_ASSERT_MSG(workloadFactory, "No workload factory"); std::string reasonIfUnsupported; BOOST_ASSERT_MSG(IWorkloadFactory::IsLayerSupported(layer, {}, reasonIfUnsupported), - "Factory does not support layer"); + "Factory does not support layer"); boost::ignore_unused(reasonIfUnsupported); - return *workloadFactory; } @@ -408,9 +416,10 @@ void LoadedNetwork::AllocateWorkingMemory() { return; } - m_CpuRef.Acquire(); - m_CpuAcc.Acquire(); - m_GpuAcc.Acquire(); + for (auto&& workloadFactory : m_WorkloadFactories) + { + workloadFactory.second->Acquire(); + } m_IsWorkingMemAllocated = true; } @@ -422,9 +431,10 @@ void LoadedNetwork::FreeWorkingMemory() return; } // Informs the memory managers to release memory in it's respective memory group - m_CpuRef.Release(); - m_CpuAcc.Release(); - m_GpuAcc.Release(); + for (auto&& workloadFactory : m_WorkloadFactories) + { + workloadFactory.second->Release(); + } m_IsWorkingMemAllocated = false; } diff --git a/src/armnn/LoadedNetwork.hpp b/src/armnn/LoadedNetwork.hpp index 3deb8bc2e2..51eb04f3df 100644 --- a/src/armnn/LoadedNetwork.hpp +++ b/src/armnn/LoadedNetwork.hpp @@ -11,13 +11,12 @@ #include "LayerFwd.hpp" #include "Profiling.hpp" -#include -#include -#include +#include #include #include #include +#include namespace cl { @@ -62,9 +61,11 @@ private: const IWorkloadFactory& GetWorkloadFactory(const Layer& layer) const; - RefWorkloadFactory m_CpuRef; - NeonWorkloadFactory m_CpuAcc; - ClWorkloadFactory m_GpuAcc; + using BackendPtrMap = std::unordered_map; + using WorkloadFactoryMap = std::unordered_map; + + BackendPtrMap m_Backends; + WorkloadFactoryMap m_WorkloadFactories; std::unique_ptr m_OptimizedNetwork; WorkloadQueue m_InputQueue; diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index f95e829cb9..cab5106959 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -211,7 +211,9 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork, // Try preferred backend first layer->SetBackendId(preferredBackend); - if (IWorkloadFactory::IsLayerSupported(*layer, boost::none, reasonIfUnsupported)) + if (IWorkloadFactory::IsLayerSupported(*layer, + EmptyOptional(), + reasonIfUnsupported)) { supportedBackendFound = true; } @@ -226,7 +228,9 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork, } layer->SetBackendId(backend); - if (IWorkloadFactory::IsLayerSupported(*layer, boost::none, reasonIfUnsupported)) + if (IWorkloadFactory::IsLayerSupported(*layer, + EmptyOptional(), + reasonIfUnsupported)) { supportedBackendFound = true; break; diff --git a/src/armnnUtils/GraphTopologicalSort.hpp b/src/armnnUtils/GraphTopologicalSort.hpp index 81a37ac4e5..11314590a0 100644 --- a/src/armnnUtils/GraphTopologicalSort.hpp +++ b/src/armnnUtils/GraphTopologicalSort.hpp @@ -4,8 +4,8 @@ // #pragma once +#include #include -#include #include #include @@ -27,7 +27,7 @@ enum class NodeState template -boost::optional GetNextChild(TNodeId node, +armnn::Optional GetNextChild(TNodeId node, std::function(TNodeId)> getIncomingEdges, std::map& nodeStates) { @@ -70,11 +70,11 @@ bool TopologicallySort( nodeStates[current] = NodeState::Visiting; - boost::optional nextChildOfCurrent = GetNextChild(current, getIncomingEdges, nodeStates); + auto nextChildOfCurrent = GetNextChild(current, getIncomingEdges, nodeStates); if (nextChildOfCurrent) { - TNodeId nextChild = nextChildOfCurrent.get(); + TNodeId nextChild = nextChildOfCurrent.value(); // If the child has not been searched, add to the stack and iterate over this node if (nodeStates.find(nextChild) == nodeStates.end()) diff --git a/src/backends/BackendRegistry.hpp b/src/backends/BackendRegistry.hpp index 23cb37da99..4465e95174 100644 --- a/src/backends/BackendRegistry.hpp +++ b/src/backends/BackendRegistry.hpp @@ -6,11 +6,12 @@ #include #include "RegistryCommon.hpp" +#include "IBackendInternal.hpp" namespace armnn { -using BackendRegistry = RegistryCommon; +using BackendRegistry = RegistryCommon; BackendRegistry& BackendRegistryInstance(); diff --git a/src/backends/IBackendInternal.hpp b/src/backends/IBackendInternal.hpp index a441abd79f..7e44dbd676 100644 --- a/src/backends/IBackendInternal.hpp +++ b/src/backends/IBackendInternal.hpp @@ -5,19 +5,27 @@ #pragma once #include -#include namespace armnn { +class IWorkloadFactory; class IBackendInternal : public IBackend { protected: + // Creation must be done through a specific + // backend interface. IBackendInternal() = default; - ~IBackendInternal() override = default; public: - virtual std::unique_ptr CreateWorkloadFactory() const = 0; + // Allow backends created by the factory function + // to be destroyed through IBackendInternal. + ~IBackendInternal() override = default; + + using IWorkloadFactoryPtr = std::unique_ptr; + virtual IWorkloadFactoryPtr CreateWorkloadFactory() const = 0; }; +using IBackendInternalUniquePtr = std::unique_ptr; + } // namespace armnn diff --git a/src/backends/WorkloadFactory.cpp b/src/backends/WorkloadFactory.cpp index e7dec49db4..fea383f030 100644 --- a/src/backends/WorkloadFactory.cpp +++ b/src/backends/WorkloadFactory.cpp @@ -5,10 +5,6 @@ #include #include -#include -#include -#include - #include #include #include @@ -24,40 +20,42 @@ namespace armnn namespace { - const TensorInfo OverrideDataType(const TensorInfo& info, boost::optional type) + +const TensorInfo OverrideDataType(const TensorInfo& info, Optional type) +{ + if (!type) { - if (type == boost::none) - { - return info; - } + return info; + } + + return TensorInfo(info.GetShape(), type.value(), info.GetQuantizationScale(), info.GetQuantizationOffset()); +} - return TensorInfo(info.GetShape(), type.get(), info.GetQuantizationScale(), info.GetQuantizationOffset()); +Optional GetBiasTypeFromWeightsType(Optional weightsType) +{ + if (!weightsType) + { + return weightsType; } - boost::optional GetBiasTypeFromWeightsType(boost::optional weightsType) + switch(weightsType.value()) { - if (weightsType == boost::none) - { + case DataType::Float16: + case DataType::Float32: return weightsType; - } - - switch(weightsType.get()) - { - case DataType::Float16: - case DataType::Float32: - return weightsType; - case DataType::QuantisedAsymm8: - return DataType::Signed32; - default: - BOOST_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type."); - } - return boost::none; + case DataType::QuantisedAsymm8: + return DataType::Signed32; + default: + BOOST_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type."); } + return EmptyOptional(); } +} // anonymous namespace + bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, const IConnectableLayer& connectableLayer, - boost::optional dataType, + Optional dataType, std::string& outReasonIfUnsupported) { Optional reason = outReasonIfUnsupported; @@ -589,7 +587,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, } bool IWorkloadFactory::IsLayerSupported(const IConnectableLayer& connectableLayer, - boost::optional dataType, + Optional dataType, std::string& outReasonIfUnsupported) { auto layer = boost::polymorphic_downcast(&connectableLayer); diff --git a/src/backends/WorkloadFactory.hpp b/src/backends/WorkloadFactory.hpp index 41d6741ae7..2d482e0911 100644 --- a/src/backends/WorkloadFactory.hpp +++ b/src/backends/WorkloadFactory.hpp @@ -6,9 +6,9 @@ #include #include +#include #include #include -#include namespace armnn { @@ -34,11 +34,11 @@ public: static bool IsLayerSupported(const BackendId& backendId, const IConnectableLayer& layer, - boost::optional dataType, + Optional dataType, std::string& outReasonIfUnsupported); static bool IsLayerSupported(const IConnectableLayer& layer, - boost::optional dataType, + Optional dataType, std::string& outReasonIfUnsupported); virtual bool SupportsSubTensors() const = 0; diff --git a/src/backends/cl/ClBackend.cpp b/src/backends/cl/ClBackend.cpp index 1bab96b49a..d6a3a89391 100644 --- a/src/backends/cl/ClBackend.cpp +++ b/src/backends/cl/ClBackend.cpp @@ -23,7 +23,7 @@ static StaticRegistryInitializer g_RegisterHelper ClBackend::GetIdStatic(), []() { - return IBackendUniquePtr(new ClBackend, &ClBackend::Destroy); + return IBackendInternalUniquePtr(new ClBackend); } }; @@ -35,14 +35,9 @@ const BackendId& ClBackend::GetIdStatic() return s_Id; } -std::unique_ptr ClBackend::CreateWorkloadFactory() const +IBackendInternal::IWorkloadFactoryPtr ClBackend::CreateWorkloadFactory() const { return std::make_unique(); } -void ClBackend::Destroy(IBackend* backend) -{ - delete boost::polymorphic_downcast(backend); -} - } // namespace armnn \ No newline at end of file diff --git a/src/backends/cl/ClBackend.hpp b/src/backends/cl/ClBackend.hpp index 49a7a466c4..4eae6c92ec 100644 --- a/src/backends/cl/ClBackend.hpp +++ b/src/backends/cl/ClBackend.hpp @@ -18,9 +18,7 @@ public: static const BackendId& GetIdStatic(); const BackendId& GetId() const override { return GetIdStatic(); } - std::unique_ptr CreateWorkloadFactory() const override; - - static void Destroy(IBackend* backend); + IWorkloadFactoryPtr CreateWorkloadFactory() const override; }; } // namespace armnn \ No newline at end of file diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp index e1d8314d82..c697d90950 100644 --- a/src/backends/cl/ClWorkloadFactory.cpp +++ b/src/backends/cl/ClWorkloadFactory.cpp @@ -35,7 +35,7 @@ namespace armnn { bool ClWorkloadFactory::IsLayerSupported(const Layer& layer, - boost::optional dataType, + Optional dataType, std::string& outReasonIfUnsupported) { return IWorkloadFactory::IsLayerSupported(Compute::GpuAcc, layer, dataType, outReasonIfUnsupported); diff --git a/src/backends/cl/ClWorkloadFactory.hpp b/src/backends/cl/ClWorkloadFactory.hpp index 66de3a50f1..1441b71e61 100644 --- a/src/backends/cl/ClWorkloadFactory.hpp +++ b/src/backends/cl/ClWorkloadFactory.hpp @@ -5,12 +5,11 @@ #pragma once #include +#include #include #include -#include - namespace armnn { @@ -22,7 +21,8 @@ public: virtual Compute GetCompute() const override { return Compute::GpuAcc; } - static bool IsLayerSupported(const Layer& layer, boost::optional dataType, + static bool IsLayerSupported(const Layer& layer, + Optional dataType, std::string& outReasonIfUnsupported); virtual bool SupportsSubTensors() const override { return true; } diff --git a/src/backends/neon/NeonBackend.cpp b/src/backends/neon/NeonBackend.cpp index b7102956df..e475f0232b 100644 --- a/src/backends/neon/NeonBackend.cpp +++ b/src/backends/neon/NeonBackend.cpp @@ -23,7 +23,7 @@ static StaticRegistryInitializer g_RegisterHelper NeonBackend::GetIdStatic(), []() { - return IBackendUniquePtr(new NeonBackend, &NeonBackend::Destroy); + return IBackendInternalUniquePtr(new NeonBackend); } }; @@ -35,14 +35,9 @@ const BackendId& NeonBackend::GetIdStatic() return s_Id; } -std::unique_ptr NeonBackend::CreateWorkloadFactory() const +IBackendInternal::IWorkloadFactoryPtr NeonBackend::CreateWorkloadFactory() const { return std::make_unique(); } -void NeonBackend::Destroy(IBackend* backend) -{ - delete boost::polymorphic_downcast(backend); -} - } // namespace armnn \ No newline at end of file diff --git a/src/backends/neon/NeonBackend.hpp b/src/backends/neon/NeonBackend.hpp index 6280610bda..e1287c784c 100644 --- a/src/backends/neon/NeonBackend.hpp +++ b/src/backends/neon/NeonBackend.hpp @@ -18,9 +18,7 @@ public: static const BackendId& GetIdStatic(); const BackendId& GetId() const override { return GetIdStatic(); } - std::unique_ptr CreateWorkloadFactory() const override; - - static void Destroy(IBackend* backend); + IWorkloadFactoryPtr CreateWorkloadFactory() const override; }; } // namespace armnn \ No newline at end of file diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp index 0e069a2f64..f0a9e76de1 100644 --- a/src/backends/neon/NeonWorkloadFactory.cpp +++ b/src/backends/neon/NeonWorkloadFactory.cpp @@ -25,7 +25,8 @@ namespace armnn { -bool NeonWorkloadFactory::IsLayerSupported(const Layer& layer, boost::optional dataType, +bool NeonWorkloadFactory::IsLayerSupported(const Layer& layer, + Optional dataType, std::string& outReasonIfUnsupported) { return IWorkloadFactory::IsLayerSupported(Compute::CpuAcc, layer, dataType, outReasonIfUnsupported); diff --git a/src/backends/neon/NeonWorkloadFactory.hpp b/src/backends/neon/NeonWorkloadFactory.hpp index 64951612c1..d1dd2c85fe 100644 --- a/src/backends/neon/NeonWorkloadFactory.hpp +++ b/src/backends/neon/NeonWorkloadFactory.hpp @@ -4,11 +4,12 @@ // #pragma once +#include #include #include #include -#include + namespace armnn { @@ -21,7 +22,8 @@ public: virtual Compute GetCompute() const override { return Compute::CpuAcc; } - static bool IsLayerSupported(const Layer& layer, boost::optional dataType, + static bool IsLayerSupported(const Layer& layer, + Optional dataType, std::string& outReasonIfUnsupported); virtual bool SupportsSubTensors() const override { return true; } diff --git a/src/backends/reference/RefBackend.cpp b/src/backends/reference/RefBackend.cpp index 9afb42d59f..34348fa120 100644 --- a/src/backends/reference/RefBackend.cpp +++ b/src/backends/reference/RefBackend.cpp @@ -23,7 +23,7 @@ static StaticRegistryInitializer g_RegisterHelper RefBackend::GetIdStatic(), []() { - return IBackendUniquePtr(new RefBackend, &RefBackend::Destroy); + return IBackendInternalUniquePtr(new RefBackend); } }; @@ -35,14 +35,9 @@ const BackendId& RefBackend::GetIdStatic() return s_Id; } -std::unique_ptr RefBackend::CreateWorkloadFactory() const +IBackendInternal::IWorkloadFactoryPtr RefBackend::CreateWorkloadFactory() const { return std::make_unique(); } -void RefBackend::Destroy(IBackend* backend) -{ - delete boost::polymorphic_downcast(backend); -} - } // namespace armnn \ No newline at end of file diff --git a/src/backends/reference/RefBackend.hpp b/src/backends/reference/RefBackend.hpp index 0cd3cf4dce..7162c9bf40 100644 --- a/src/backends/reference/RefBackend.hpp +++ b/src/backends/reference/RefBackend.hpp @@ -18,9 +18,7 @@ public: static const BackendId& GetIdStatic(); const BackendId& GetId() const override { return GetIdStatic(); } - std::unique_ptr CreateWorkloadFactory() const override; - - static void Destroy(IBackend* backend); + IWorkloadFactoryPtr CreateWorkloadFactory() const override; }; } // namespace armnn \ No newline at end of file diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp index 048f6cdcc4..783e5fba2e 100644 --- a/src/backends/reference/RefWorkloadFactory.cpp +++ b/src/backends/reference/RefWorkloadFactory.cpp @@ -25,7 +25,8 @@ RefWorkloadFactory::RefWorkloadFactory() { } -bool RefWorkloadFactory::IsLayerSupported(const Layer& layer, boost::optional dataType, +bool RefWorkloadFactory::IsLayerSupported(const Layer& layer, + Optional dataType, std::string& outReasonIfUnsupported) { return IWorkloadFactory::IsLayerSupported(Compute::CpuRef, layer, dataType, outReasonIfUnsupported); diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp index 1a9227a978..ef2e1abfaa 100644 --- a/src/backends/reference/RefWorkloadFactory.hpp +++ b/src/backends/reference/RefWorkloadFactory.hpp @@ -4,11 +4,12 @@ // #pragma once +#include #include #include #include -#include + namespace armnn { @@ -34,7 +35,8 @@ public: virtual Compute GetCompute() const override { return Compute::CpuRef; } - static bool IsLayerSupported(const Layer& layer, boost::optional dataType, + static bool IsLayerSupported(const Layer& layer, + Optional dataType, std::string& outReasonIfUnsupported); virtual bool SupportsSubTensors() const override { return false; } diff --git a/src/backends/test/BackendRegistryTests.cpp b/src/backends/test/BackendRegistryTests.cpp index f6f749936f..34a2706466 100644 --- a/src/backends/test/BackendRegistryTests.cpp +++ b/src/backends/test/BackendRegistryTests.cpp @@ -55,7 +55,7 @@ BOOST_AUTO_TEST_CASE(TestRegistryHelper) [&called]() { called = true; - return armnn::IBackendUniquePtr(nullptr, nullptr); + return armnn::IBackendInternalUniquePtr(nullptr); } ); @@ -82,7 +82,7 @@ BOOST_AUTO_TEST_CASE(TestDirectCallToRegistry) [&called]() { called = true; - return armnn::IBackendUniquePtr(nullptr, nullptr); + return armnn::IBackendInternalUniquePtr(nullptr); } ); diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 50e24c9042..97f21154fe 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -14,13 +14,13 @@ set(inference_test_sources InferenceTestImage.cpp) add_library_ex(inferenceTest STATIC ${inference_test_sources}) target_include_directories(inferenceTest PRIVATE ../src/armnnUtils) -target_include_directories(inferenceTest PRIVATE ../src/backends) +target_include_directories(inferenceTest PRIVATE ../src) if(BUILD_CAFFE_PARSER) macro(CaffeParserTest testName sources) add_executable_ex(${testName} ${sources}) target_include_directories(${testName} PRIVATE ../src/armnnUtils) - target_include_directories(${testName} PRIVATE ../src/backends) + target_include_directories(${testName} PRIVATE ../src) set_target_properties(${testName} PROPERTIES COMPILE_FLAGS "${CAFFE_PARSER_TEST_ADDITIONAL_COMPILE_FLAGS}") target_link_libraries(${testName} inferenceTest) @@ -91,7 +91,7 @@ if(BUILD_TF_PARSER) macro(TfParserTest testName sources) add_executable_ex(${testName} ${sources}) target_include_directories(${testName} PRIVATE ../src/armnnUtils) - target_include_directories(${testName} PRIVATE ../src/backends) + target_include_directories(${testName} PRIVATE ../src) target_link_libraries(${testName} inferenceTest) target_link_libraries(${testName} armnnTfParser) @@ -142,7 +142,7 @@ if (BUILD_TF_LITE_PARSER) macro(TfLiteParserTest testName sources) add_executable_ex(${testName} ${sources}) target_include_directories(${testName} PRIVATE ../src/armnnUtils) - target_include_directories(${testName} PRIVATE ../src/backends) + target_include_directories(${testName} PRIVATE ../src) target_link_libraries(${testName} inferenceTest) target_link_libraries(${testName} armnnTfLiteParser) @@ -169,7 +169,7 @@ if (BUILD_ONNX_PARSER) macro(OnnxParserTest testName sources) add_executable_ex(${testName} ${sources}) target_include_directories(${testName} PRIVATE ../src/armnnUtils) - target_include_directories(${testName} PRIVATE ../src/backends) + target_include_directories(${testName} PRIVATE ../src) target_link_libraries(${testName} inferenceTest) target_link_libraries(${testName} armnnOnnxParser) @@ -205,7 +205,7 @@ if (BUILD_CAFFE_PARSER OR BUILD_TF_PARSER OR BUILD_TF_LITE_PARSER OR BUILD_ONNX_ add_executable_ex(ExecuteNetwork ${ExecuteNetwork_sources}) target_include_directories(ExecuteNetwork PRIVATE ../src/armnn) target_include_directories(ExecuteNetwork PRIVATE ../src/armnnUtils) - target_include_directories(ExecuteNetwork PRIVATE ../src/backends) + target_include_directories(ExecuteNetwork PRIVATE ../src) if (BUILD_CAFFE_PARSER) target_link_libraries(ExecuteNetwork armnnCaffeParser) diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp index 8ef17d4df5..ac895b9a0c 100644 --- a/tests/InferenceModel.hpp +++ b/tests/InferenceModel.hpp @@ -14,7 +14,7 @@ #include #endif -#include +#include #include #include -- cgit v1.2.1