aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Beck <david.beck@arm.com>2018-10-12 10:38:31 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2018-10-22 16:57:53 +0100
commit3cc9a626773ae9e79d3d0bd9c120704676d44daa (patch)
tree499a9c3545d7ef2f2b4abc30e9351734a1e4c4a0
parent233b3d685b4e4e931e86e021b77ee81d5b818f38 (diff)
downloadarmnn-3cc9a626773ae9e79d3d0bd9c120704676d44daa.tar.gz
IVGCVSW-1998 : replace Compute enum in LayerSupport free functions
!android-nn-driver:153490 Change-Id: I1c2a5f942e3a1c3626e093c90545ca27c64ba5e8
-rw-r--r--Android.mk3
-rw-r--r--CMakeLists.txt20
-rw-r--r--include/armnn/Exceptions.hpp10
-rw-r--r--include/armnn/LayerSupport.hpp56
-rw-r--r--src/armnn/Exceptions.cpp19
-rw-r--r--src/armnn/LayerSupport.cpp163
-rw-r--r--src/backends/BackendRegistry.cpp6
-rw-r--r--src/backends/CMakeLists.txt2
-rw-r--r--src/backends/ILayerSupport.cpp1
-rw-r--r--src/backends/README.md4
-rw-r--r--src/backends/aclCommon/CMakeLists.txt2
-rw-r--r--src/backends/backends.cmake10
-rw-r--r--src/backends/cl/CMakeLists.txt2
-rw-r--r--src/backends/cl/ClBackend.cpp6
-rw-r--r--src/backends/cl/ClBackend.hpp3
-rw-r--r--src/backends/cl/ClLayerSupport.cpp6
-rw-r--r--src/backends/cl/ClLayerSupport.hpp2
-rw-r--r--src/backends/cl/test/CMakeLists.txt2
-rw-r--r--src/backends/cl/workloads/CMakeLists.txt2
-rw-r--r--src/backends/neon/CMakeLists.txt2
-rw-r--r--src/backends/neon/NeonBackend.cpp7
-rw-r--r--src/backends/neon/NeonBackend.hpp3
-rw-r--r--src/backends/neon/NeonLayerSupport.hpp5
-rw-r--r--src/backends/neon/test/CMakeLists.txt2
-rw-r--r--src/backends/neon/workloads/CMakeLists.txt2
-rw-r--r--src/backends/reference/CMakeLists.txt2
-rw-r--r--src/backends/reference/RefBackend.cpp6
-rw-r--r--src/backends/reference/RefBackend.hpp3
-rw-r--r--src/backends/reference/RefLayerSupport.cpp4
-rw-r--r--src/backends/reference/RefLayerSupport.hpp5
-rw-r--r--src/backends/reference/test/CMakeLists.txt2
-rw-r--r--src/backends/reference/workloads/CMakeLists.txt2
32 files changed, 207 insertions, 157 deletions
diff --git a/Android.mk b/Android.mk
index 25ed834852..7493374a46 100644
--- a/Android.mk
+++ b/Android.mk
@@ -204,13 +204,14 @@ LOCAL_SRC_FILES := \
LOCAL_STATIC_LIBRARIES := \
libneuralnetworks_common \
- libarmnn \
libboost_log \
libboost_system \
libboost_unit_test_framework \
libboost_thread \
armnn-arm_compute
+LOCAL_WHOLE_STATIC_LIBRARIES := libarmnn
+
LOCAL_SHARED_LIBRARIES := \
libbase \
libhidlbase \
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 22eeedde03..8182c22dc8 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -284,20 +284,22 @@ if(PROFILING_BACKEND_STREAMLINE)
${GATOR_ROOT}/annotate/streamline_annotate.c)
endif()
-add_library_ex(armnn SHARED ${armnn_sources})
-
# the backends under src/backends extend the list of
-# static libs armnn to link against
-list(APPEND armnnLibraries armnnUtils)
+# object libs armnn to include in the build
include(src/backends/backends.cmake)
+foreach(lib ${armnnLibraries})
+ message("Adding object library dependency to armnn: ${lib}")
+ list(APPEND armnn_sources $<TARGET_OBJECTS:${lib}>)
+endforeach()
+
+add_library_ex(armnn SHARED ${armnn_sources})
target_include_directories(armnn PRIVATE src)
target_include_directories(armnn PRIVATE src/armnn)
target_include_directories(armnn PRIVATE src/armnnUtils)
-foreach(lib ${armnnLibraries})
- target_link_libraries(armnn ${lib})
- message("Adding library dependency to armnn: ${lib}")
-endforeach()
+
+target_link_libraries(armnn armnnUtils)
+
target_link_libraries(armnn ${CMAKE_DL_LIBS})
install(TARGETS armnn DESTINATION ${CMAKE_INSTALL_PREFIX}/lib)
@@ -458,7 +460,7 @@ if(BUILD_UNIT_TESTS)
endif()
foreach(lib ${armnnUnitTestLibraries})
- message("Adding library dependency to UnitTests: ${lib}")
+ message("Adding object library dependency to UnitTests: ${lib}")
list(APPEND unittest_sources $<TARGET_OBJECTS:${lib}>)
endforeach()
diff --git a/include/armnn/Exceptions.hpp b/include/armnn/Exceptions.hpp
index 89b6f2cfbb..29d874cd05 100644
--- a/include/armnn/Exceptions.hpp
+++ b/include/armnn/Exceptions.hpp
@@ -48,6 +48,16 @@ class Exception : public std::exception
public:
explicit Exception(const std::string& message);
+ // exception with context
+ explicit Exception(const std::string& message,
+ const CheckLocation& location);
+
+ // preserving previous exception context
+ // and adding local context information
+ explicit Exception(const Exception& other,
+ const std::string& message,
+ const CheckLocation& location);
+
virtual const char* what() const noexcept override;
private:
diff --git a/include/armnn/LayerSupport.hpp b/include/armnn/LayerSupport.hpp
index 31874fe944..8af8240f03 100644
--- a/include/armnn/LayerSupport.hpp
+++ b/include/armnn/LayerSupport.hpp
@@ -12,21 +12,21 @@
namespace armnn
{
-bool IsActivationSupported(Compute compute,
+bool IsActivationSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
const ActivationDescriptor& descriptor,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
-bool IsAdditionSupported(Compute compute,
+bool IsAdditionSupported(const BackendId& backend,
const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
-bool IsBatchNormalizationSupported(Compute compute,
+bool IsBatchNormalizationSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
const TensorInfo& mean,
@@ -37,24 +37,24 @@ bool IsBatchNormalizationSupported(Compute compute,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
-bool IsConstantSupported(Compute compute,
+bool IsConstantSupported(const BackendId& backend,
const TensorInfo& output,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
-bool IsConvertFp16ToFp32Supported(Compute compute,
+bool IsConvertFp16ToFp32Supported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
-bool IsConvertFp32ToFp16Supported(Compute compute,
+bool IsConvertFp32ToFp16Supported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
-bool IsConvolution2dSupported(Compute compute,
+bool IsConvolution2dSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
const Convolution2dDescriptor& descriptor,
@@ -63,7 +63,7 @@ bool IsConvolution2dSupported(Compute compute,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
-bool IsDepthwiseConvolutionSupported(Compute compute,
+bool IsDepthwiseConvolutionSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
const DepthwiseConvolution2dDescriptor& descriptor,
@@ -72,26 +72,26 @@ bool IsDepthwiseConvolutionSupported(Compute compute,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
-bool IsDivisionSupported(Compute compute,
+bool IsDivisionSupported(const BackendId& backend,
const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
-bool IsSubtractionSupported(Compute compute,
+bool IsSubtractionSupported(const BackendId& backend,
const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
-bool IsInputSupported(Compute compute,
+bool IsInputSupported(const BackendId& backend,
const TensorInfo& input,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
-bool IsFullyConnectedSupported(Compute compute,
+bool IsFullyConnectedSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
const TensorInfo& weights,
@@ -100,14 +100,14 @@ bool IsFullyConnectedSupported(Compute compute,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
-bool IsL2NormalizationSupported(Compute compute,
+bool IsL2NormalizationSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
const L2NormalizationDescriptor& descriptor,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
-bool IsLstmSupported(Compute compute, const TensorInfo& input, const TensorInfo& outputStateIn,
+bool IsLstmSupported(const BackendId& backend, const TensorInfo& input, const TensorInfo& outputStateIn,
const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
const TensorInfo& output, const LstmDescriptor& descriptor,
@@ -122,88 +122,88 @@ bool IsLstmSupported(Compute compute, const TensorInfo& input, const TensorInfo&
const TensorInfo* cellToOutputWeights, char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
-bool IsMergerSupported(Compute compute,
+bool IsMergerSupported(const BackendId& backend,
const std::vector<const TensorInfo*> inputs,
const OriginsDescriptor& descriptor,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
-bool IsMultiplicationSupported(Compute compute,
+bool IsMultiplicationSupported(const BackendId& backend,
const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
-bool IsNormalizationSupported(Compute compute,
+bool IsNormalizationSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
const NormalizationDescriptor& descriptor,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
-bool IsOutputSupported(Compute compute,
+bool IsOutputSupported(const BackendId& backend,
const TensorInfo& output,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
-bool IsPermuteSupported(Compute compute,
+bool IsPermuteSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
const PermuteDescriptor& descriptor,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
-bool IsPooling2dSupported(Compute compute,
+bool IsPooling2dSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
const Pooling2dDescriptor& descriptor,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
-bool IsResizeBilinearSupported(Compute compute,
+bool IsResizeBilinearSupported(const BackendId& backend,
const TensorInfo& input,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
-bool IsSoftmaxSupported(Compute compute,
+bool IsSoftmaxSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
const SoftmaxDescriptor& descriptor,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
-bool IsSplitterSupported(Compute compute,
+bool IsSplitterSupported(const BackendId& backend,
const TensorInfo& input,
const ViewsDescriptor& descriptor,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
-bool IsFakeQuantizationSupported(Compute compute,
+bool IsFakeQuantizationSupported(const BackendId& backend,
const TensorInfo& input,
const FakeQuantizationDescriptor& descriptor,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
-bool IsReshapeSupported(Compute compute,
+bool IsReshapeSupported(const BackendId& backend,
const TensorInfo& input,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
-bool IsFloorSupported(Compute compute,
+bool IsFloorSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
-bool IsMeanSupported(Compute compute,
+bool IsMeanSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
const MeanDescriptor& descriptor,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
-bool IsPadSupported(Compute compute,
+bool IsPadSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
const PadDescriptor& descriptor,
diff --git a/src/armnn/Exceptions.cpp b/src/armnn/Exceptions.cpp
index 1c4ebb6aba..52b28e9382 100644
--- a/src/armnn/Exceptions.cpp
+++ b/src/armnn/Exceptions.cpp
@@ -2,7 +2,7 @@
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#include "armnn/Exceptions.hpp"
+#include <armnn/Exceptions.hpp>
#include <string>
@@ -10,10 +10,25 @@ namespace armnn
{
Exception::Exception(const std::string& message)
-: m_Message(message)
+: m_Message{message}
{
}
+Exception::Exception(const std::string& message,
+ const CheckLocation& location)
+: m_Message{message}
+{
+ m_Message += location.AsString();
+}
+
+Exception::Exception(const Exception& other,
+ const std::string& message,
+ const CheckLocation& location)
+: m_Message{other.m_Message}
+{
+ m_Message += "\n" + message + location.AsString();
+}
+
const char* Exception::what() const noexcept
{
return m_Message.c_str();
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp
index 8bad89f070..2494c74373 100644
--- a/src/armnn/LayerSupport.cpp
+++ b/src/armnn/LayerSupport.cpp
@@ -5,18 +5,19 @@
#include <armnn/LayerSupport.hpp>
#include <armnn/Optional.hpp>
-#include <backends/reference/RefLayerSupport.hpp>
-#include <backends/neon/NeonLayerSupport.hpp>
-#include <backends/cl/ClLayerSupport.hpp>
+#include <backends/BackendRegistry.hpp>
#include <boost/assert.hpp>
#include <cstring>
#include <algorithm>
+#include <unordered_map>
namespace armnn
{
+namespace
+{
/// Helper function to copy a full string to a truncated version.
void CopyErrorMessage(char* truncatedString, const char* fullString, size_t maxLength)
{
@@ -29,27 +30,39 @@ void CopyErrorMessage(char* truncatedString, const char* fullString, size_t maxL
}
}
+IBackend& GetBackend(const BackendId& id)
+{
+ static std::unordered_map<BackendId, IBackendUniquePtr> cachedBackends;
+ auto it = cachedBackends.find(id);
+ if (it == cachedBackends.end())
+ {
+ auto factoryFunc = BackendRegistry::Instance().GetFactory(id);
+ auto emplaceResult =
+ cachedBackends.emplace(
+ std::make_pair(id, factoryFunc())
+ );
+ BOOST_ASSERT(emplaceResult.second);
+ it = emplaceResult.first;
+ }
+
+ return *(it->second.get());
+}
+
+}
+
// Helper macro to avoid code duplication.
// Forwards function func to funcRef, funcNeon or funcCl, depending on the value of compute.
-#define FORWARD_LAYER_SUPPORT_FUNC(compute, func, ...) \
+#define FORWARD_LAYER_SUPPORT_FUNC(backend, func, ...) \
std::string reasonIfUnsupportedFull; \
bool isSupported; \
- switch(compute) \
- { \
- case Compute::CpuRef: \
- isSupported = func##Ref(__VA_ARGS__, Optional<std::string&>(reasonIfUnsupportedFull)); \
- break; \
- case Compute::CpuAcc: \
- isSupported = func##Neon(__VA_ARGS__, Optional<std::string&>(reasonIfUnsupportedFull)); \
- break; \
- case Compute::GpuAcc: \
- isSupported = func##Cl(__VA_ARGS__, Optional<std::string&>(reasonIfUnsupportedFull)); \
- break; \
- default: \
- isSupported = func##Ref(__VA_ARGS__, Optional<std::string&>(reasonIfUnsupportedFull)); \
- break; \
+ try { \
+ auto const& layerSupportObject = GetBackend(backend).GetLayerSupport(); \
+ isSupported = layerSupportObject.func(__VA_ARGS__, Optional<std::string&>(reasonIfUnsupportedFull)); \
+ CopyErrorMessage(reasonIfUnsupported, reasonIfUnsupportedFull.c_str(), reasonIfUnsupportedMaxLength); \
+ } catch (InvalidArgumentException e) { \
+ /* re-throwing with more context information */ \
+ throw InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
} \
- CopyErrorMessage(reasonIfUnsupported, reasonIfUnsupportedFull.c_str(), reasonIfUnsupportedMaxLength); \
return isSupported;
bool CheckTensorDataTypesEqual(const TensorInfo& input0, const TensorInfo& input1)
@@ -57,17 +70,17 @@ bool CheckTensorDataTypesEqual(const TensorInfo& input0, const TensorInfo& input
return input0.GetDataType() == input1.GetDataType();
}
-bool IsActivationSupported(Compute compute,
+bool IsActivationSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
const ActivationDescriptor& descriptor,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(compute, IsActivationSupported, input, output, descriptor);
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsActivationSupported, input, output, descriptor);
}
-bool IsAdditionSupported(Compute compute,
+bool IsAdditionSupported(const BackendId& backend,
const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
@@ -79,10 +92,10 @@ bool IsAdditionSupported(Compute compute,
return false;
}
- FORWARD_LAYER_SUPPORT_FUNC(compute, IsAdditionSupported, input0, input1, output);
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsAdditionSupported, input0, input1, output);
}
-bool IsBatchNormalizationSupported(Compute compute,
+bool IsBatchNormalizationSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
const TensorInfo& mean,
@@ -93,7 +106,7 @@ bool IsBatchNormalizationSupported(Compute compute,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(compute,
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
IsBatchNormalizationSupported,
input,
output,
@@ -104,33 +117,33 @@ bool IsBatchNormalizationSupported(Compute compute,
descriptor);
}
-bool IsConstantSupported(Compute compute,
+bool IsConstantSupported(const BackendId& backend,
const TensorInfo& output,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(compute, IsConstantSupported, output);
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsConstantSupported, output);
}
-bool IsConvertFp16ToFp32Supported(Compute compute,
+bool IsConvertFp16ToFp32Supported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(compute, IsConvertFp16ToFp32Supported, input, output);
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsConvertFp16ToFp32Supported, input, output);
}
-bool IsConvertFp32ToFp16Supported(Compute compute,
+bool IsConvertFp32ToFp16Supported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(compute, IsConvertFp32ToFp16Supported, input, output);
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsConvertFp32ToFp16Supported, input, output);
}
-bool IsConvolution2dSupported(Compute compute,
+bool IsConvolution2dSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
const Convolution2dDescriptor& descriptor,
@@ -139,30 +152,30 @@ bool IsConvolution2dSupported(Compute compute,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(compute, IsConvolution2dSupported, input, output, descriptor, weights, biases);
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsConvolution2dSupported, input, output, descriptor, weights, biases);
}
-bool IsDivisionSupported(Compute compute,
+bool IsDivisionSupported(const BackendId& backend,
const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(compute, IsDivisionSupported, input0, input1, output);
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsDivisionSupported, input0, input1, output);
}
-bool IsSubtractionSupported(Compute compute,
+bool IsSubtractionSupported(const BackendId& backend,
const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(compute, IsSubtractionSupported, input0, input1, output);
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsSubtractionSupported, input0, input1, output);
}
-bool IsDepthwiseConvolutionSupported(Compute compute,
+bool IsDepthwiseConvolutionSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
const DepthwiseConvolution2dDescriptor& descriptor,
@@ -171,18 +184,18 @@ bool IsDepthwiseConvolutionSupported(Compute compute,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(compute, IsDepthwiseConvolutionSupported, input, output, descriptor, weights, biases);
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsDepthwiseConvolutionSupported, input, output, descriptor, weights, biases);
}
-bool IsInputSupported(Compute compute,
+bool IsInputSupported(const BackendId& backend,
const TensorInfo& input,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(compute, IsInputSupported, input);
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsInputSupported, input);
}
-bool IsFullyConnectedSupported(Compute compute,
+bool IsFullyConnectedSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
const TensorInfo& weights,
@@ -191,20 +204,20 @@ bool IsFullyConnectedSupported(Compute compute,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(compute, IsFullyConnectedSupported, input, output, weights, biases, descriptor);
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsFullyConnectedSupported, input, output, weights, biases, descriptor);
}
-bool IsL2NormalizationSupported(Compute compute,
+bool IsL2NormalizationSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
const L2NormalizationDescriptor& descriptor,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(compute, IsL2NormalizationSupported, input, output, descriptor);
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsL2NormalizationSupported, input, output, descriptor);
}
-bool IsLstmSupported(Compute compute, const TensorInfo& input, const TensorInfo& outputStateIn,
+bool IsLstmSupported(const BackendId& backend, const TensorInfo& input, const TensorInfo& outputStateIn,
const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
const TensorInfo& output, const LstmDescriptor& descriptor,
@@ -220,7 +233,7 @@ bool IsLstmSupported(Compute compute, const TensorInfo& input, const TensorInfo&
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(compute, IsLstmSupported, input, outputStateIn, cellStateIn,
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsLstmSupported, input, outputStateIn, cellStateIn,
scratchBuffer, outputStateOut, cellStateOut,
output, descriptor, inputToForgetWeights, inputToCellWeights,
inputToOutputWeights, recurrentToForgetWeights,
@@ -230,109 +243,109 @@ bool IsLstmSupported(Compute compute, const TensorInfo& input, const TensorInfo&
cellToInputWeights, inputGateBias, projectionWeights,
projectionBias, cellToForgetWeights, cellToOutputWeights);
}
-bool IsMergerSupported(Compute compute,
+bool IsMergerSupported(const BackendId& backend,
std::vector<const TensorInfo*> inputs,
const OriginsDescriptor& descriptor,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
BOOST_ASSERT(inputs.size() > 0);
- FORWARD_LAYER_SUPPORT_FUNC(compute, IsMergerSupported, inputs, descriptor);
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsMergerSupported, inputs, descriptor);
}
-bool IsMultiplicationSupported(Compute compute,
+bool IsMultiplicationSupported(const BackendId& backend,
const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(compute, IsMultiplicationSupported, input0, input1, output);
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsMultiplicationSupported, input0, input1, output);
}
-bool IsNormalizationSupported(Compute compute,
+bool IsNormalizationSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
const NormalizationDescriptor& descriptor,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(compute, IsNormalizationSupported, input, output, descriptor);
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsNormalizationSupported, input, output, descriptor);
}
-bool IsOutputSupported(Compute compute,
+bool IsOutputSupported(const BackendId& backend,
const TensorInfo& output,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(compute, IsOutputSupported, output);
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsOutputSupported, output);
}
-bool IsPermuteSupported(Compute compute,
+bool IsPermuteSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
const PermuteDescriptor& descriptor,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(compute, IsPermuteSupported, input, output, descriptor);
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsPermuteSupported, input, output, descriptor);
}
-bool IsPooling2dSupported(Compute compute,
+bool IsPooling2dSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
const Pooling2dDescriptor& descriptor,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(compute, IsPooling2dSupported, input, output, descriptor);
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsPooling2dSupported, input, output, descriptor);
}
-bool IsResizeBilinearSupported(Compute compute,
+bool IsResizeBilinearSupported(const BackendId& backend,
const TensorInfo& input,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(compute, IsResizeBilinearSupported, input);
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsResizeBilinearSupported, input);
}
-bool IsSoftmaxSupported(Compute compute,
+bool IsSoftmaxSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
const SoftmaxDescriptor& descriptor,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(compute, IsSoftmaxSupported, input, output, descriptor);
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsSoftmaxSupported, input, output, descriptor);
}
-bool IsSplitterSupported(Compute compute,
+bool IsSplitterSupported(const BackendId& backend,
const TensorInfo& input,
const ViewsDescriptor& descriptor,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(compute, IsSplitterSupported, input, descriptor);
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsSplitterSupported, input, descriptor);
}
-bool IsFakeQuantizationSupported(Compute compute,
+bool IsFakeQuantizationSupported(const BackendId& backend,
const TensorInfo& input,
const FakeQuantizationDescriptor& descriptor,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(compute, IsFakeQuantizationSupported, input, descriptor);
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsFakeQuantizationSupported, input, descriptor);
}
-bool IsReshapeSupported(Compute compute,
+bool IsReshapeSupported(const BackendId& backend,
const TensorInfo& input,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(compute, IsReshapeSupported, input);
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsReshapeSupported, input);
}
-bool IsFloorSupported(Compute compute,
+bool IsFloorSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
char* reasonIfUnsupported,
@@ -344,20 +357,20 @@ bool IsFloorSupported(Compute compute,
return false;
}
- FORWARD_LAYER_SUPPORT_FUNC(compute, IsFloorSupported, input, output);
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsFloorSupported, input, output);
}
-bool IsMeanSupported(Compute compute,
+bool IsMeanSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
const MeanDescriptor& descriptor,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(compute, IsMeanSupported, input, output, descriptor);
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsMeanSupported, input, output, descriptor);
}
-bool IsPadSupported(Compute compute,
+bool IsPadSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
const PadDescriptor& descriptor,
@@ -365,7 +378,7 @@ bool IsPadSupported(Compute compute,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(compute, IsPadSupported, input, output, descriptor);
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsPadSupported, input, output, descriptor);
}
}
diff --git a/src/backends/BackendRegistry.cpp b/src/backends/BackendRegistry.cpp
index a5e9f0e1d9..1360168b9f 100644
--- a/src/backends/BackendRegistry.cpp
+++ b/src/backends/BackendRegistry.cpp
@@ -19,7 +19,8 @@ void BackendRegistry::Register(const BackendId& id, FactoryFunction factory)
{
if (m_BackendFactories.count(id) > 0)
{
- throw InvalidArgumentException(std::string(id) + " already registered as backend");
+ throw InvalidArgumentException(std::string(id) + " already registered as backend",
+ CHECK_LOCATION());
}
m_BackendFactories[id] = factory;
@@ -30,7 +31,8 @@ BackendRegistry::FactoryFunction BackendRegistry::GetFactory(const BackendId& id
auto it = m_BackendFactories.find(id);
if (it == m_BackendFactories.end())
{
- throw InvalidArgumentException(std::string(id) + " has no backend factory registered");
+ throw InvalidArgumentException(std::string(id) + " has no backend factory registered",
+ CHECK_LOCATION());
}
return it->second;
diff --git a/src/backends/CMakeLists.txt b/src/backends/CMakeLists.txt
index 0bc6888899..3079447bb2 100644
--- a/src/backends/CMakeLists.txt
+++ b/src/backends/CMakeLists.txt
@@ -30,7 +30,7 @@ list(APPEND armnnBackendsCommon_sources
WorkloadUtils.hpp
)
-add_library(armnnBackendsCommon STATIC ${armnnBackendsCommon_sources})
+add_library(armnnBackendsCommon OBJECT ${armnnBackendsCommon_sources})
target_include_directories(armnnBackendsCommon PRIVATE ${PROJECT_SOURCE_DIR}/src)
target_include_directories(armnnBackendsCommon PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn)
target_include_directories(armnnBackendsCommon PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils)
diff --git a/src/backends/ILayerSupport.cpp b/src/backends/ILayerSupport.cpp
index beefa8788d..34168c55b8 100644
--- a/src/backends/ILayerSupport.cpp
+++ b/src/backends/ILayerSupport.cpp
@@ -4,6 +4,7 @@
//
#include <armnn/ILayerSupport.hpp>
+#include <armnn/Exceptions.hpp>
namespace armnn
{
diff --git a/src/backends/README.md b/src/backends/README.md
index 09b9e81539..670d6cfd98 100644
--- a/src/backends/README.md
+++ b/src/backends/README.md
@@ -13,7 +13,7 @@ ArmNN source tree.
The ```backend.cmake``` has two main purposes:
-1. It makes sure the artifact (typically a static library) is linked into the ArmNN shared library.
+1. It makes sure the artifact (a cmake OBJECT library) is linked into the ArmNN shared library.
2. It makes sure that the subdirectory where backend sources reside gets included in the build.
To achieve this there are two requirements for the ```backend.cmake``` file
@@ -28,7 +28,7 @@ To achieve this there are two requirements for the ```backend.cmake``` file
add_subdirectory(${PROJECT_SOURCE_DIR}/src/backends/reference)
#
-# Add the static libraries built by the reference backend to the
+# Add the cmake OBJECT libraries built by the reference backend to the
# list of libraries linked against the ArmNN shared library.
#
list(APPEND armnnLibraries armnnRefBackend armnnRefBackendWorkloads)
diff --git a/src/backends/aclCommon/CMakeLists.txt b/src/backends/aclCommon/CMakeLists.txt
index d99b90b897..2bfd024e10 100644
--- a/src/backends/aclCommon/CMakeLists.txt
+++ b/src/backends/aclCommon/CMakeLists.txt
@@ -25,7 +25,7 @@ list(APPEND armnnAclCommon_sources
add_subdirectory(test)
-add_library(armnnAclCommon STATIC ${armnnAclCommon_sources})
+add_library(armnnAclCommon OBJECT ${armnnAclCommon_sources})
target_include_directories(armnnAclCommon PRIVATE ${PROJECT_SOURCE_DIR}/src)
target_include_directories(armnnAclCommon PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn)
target_include_directories(armnnAclCommon PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils)
diff --git a/src/backends/backends.cmake b/src/backends/backends.cmake
index f6f69bd11e..57f5a00015 100644
--- a/src/backends/backends.cmake
+++ b/src/backends/backends.cmake
@@ -11,8 +11,14 @@ list(APPEND armnnLibraries armnnBackendsCommon)
FILE(GLOB commonIncludes ${PROJECT_SOURCE_DIR}/src/backends/*/common.cmake)
FILE(GLOB backendIncludes ${PROJECT_SOURCE_DIR}/src/backends/*/backend.cmake)
-# prefer to include common code first so backends can depend on them
-foreach(includeFile ${commonIncludes} ${backendIncludes})
+# prefer to include common code first
+foreach(includeFile ${commonIncludes})
+ message("Including backend common library into the build: ${includeFile}")
+ include(${includeFile})
+endforeach()
+
+# now backends can depend on common code included first
+foreach(includeFile ${backendIncludes})
message("Including backend into the build: ${includeFile}")
include(${includeFile})
endforeach()
diff --git a/src/backends/cl/CMakeLists.txt b/src/backends/cl/CMakeLists.txt
index 2f32081dfe..5704e0e8ab 100644
--- a/src/backends/cl/CMakeLists.txt
+++ b/src/backends/cl/CMakeLists.txt
@@ -24,7 +24,7 @@ if(ARMCOMPUTECL)
add_subdirectory(test)
endif()
-add_library(armnnClBackend STATIC ${armnnClBackend_sources})
+add_library(armnnClBackend OBJECT ${armnnClBackend_sources})
target_include_directories(armnnClBackend PRIVATE ${PROJECT_SOURCE_DIR}/src)
target_include_directories(armnnClBackend PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn)
target_include_directories(armnnClBackend PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils)
diff --git a/src/backends/cl/ClBackend.cpp b/src/backends/cl/ClBackend.cpp
index 95acf009de..29d1b3a402 100644
--- a/src/backends/cl/ClBackend.cpp
+++ b/src/backends/cl/ClBackend.cpp
@@ -15,10 +15,9 @@ namespace armnn
namespace
{
-static const BackendId s_Id{"GpuAcc"};
static BackendRegistry::Helper g_RegisterHelper{
- s_Id,
+ ClBackend::GetIdStatic(),
[]()
{
return IBackendUniquePtr(new ClBackend, &ClBackend::Destroy);
@@ -27,8 +26,9 @@ static BackendRegistry::Helper g_RegisterHelper{
}
-const BackendId& ClBackend::GetId() const
+const BackendId& ClBackend::GetIdStatic()
{
+ static const BackendId s_Id{"GpuAcc"};
return s_Id;
}
diff --git a/src/backends/cl/ClBackend.hpp b/src/backends/cl/ClBackend.hpp
index b927db4b25..1a99b7652b 100644
--- a/src/backends/cl/ClBackend.hpp
+++ b/src/backends/cl/ClBackend.hpp
@@ -16,7 +16,8 @@ public:
ClBackend() = default;
~ClBackend() = default;
- const BackendId& GetId() const override;
+ static const BackendId& GetIdStatic();
+ const BackendId& GetId() const override { return GetIdStatic(); }
const ILayerSupport& GetLayerSupport() const override;
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index 9088da8645..7c66348b98 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -5,8 +5,10 @@
#include "ClLayerSupport.hpp"
-#include "InternalTypes.hpp"
-#include "LayerSupportCommon.hpp"
+#include <InternalTypes.hpp>
+#include <LayerSupportCommon.hpp>
+
+#include <armnn/Descriptors.hpp>
#include <boost/core/ignore_unused.hpp>
diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp
index 75e90e000d..2d57d10040 100644
--- a/src/backends/cl/ClLayerSupport.hpp
+++ b/src/backends/cl/ClLayerSupport.hpp
@@ -4,7 +4,7 @@
//
#pragma once
-#include <armnn/ArmNN.hpp>
+#include <armnn/ILayerSupport.hpp>
namespace armnn
{
diff --git a/src/backends/cl/test/CMakeLists.txt b/src/backends/cl/test/CMakeLists.txt
index 4936a78645..262e23a7c1 100644
--- a/src/backends/cl/test/CMakeLists.txt
+++ b/src/backends/cl/test/CMakeLists.txt
@@ -15,4 +15,4 @@ list(APPEND armnnClBackendUnitTests_sources
add_library(armnnClBackendUnitTests OBJECT ${armnnClBackendUnitTests_sources})
target_include_directories(armnnClBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src)
target_include_directories(armnnClBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn)
-target_include_directories(armnnClBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils) \ No newline at end of file
+target_include_directories(armnnClBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils)
diff --git a/src/backends/cl/workloads/CMakeLists.txt b/src/backends/cl/workloads/CMakeLists.txt
index 5bd217295e..59a45facea 100644
--- a/src/backends/cl/workloads/CMakeLists.txt
+++ b/src/backends/cl/workloads/CMakeLists.txt
@@ -58,7 +58,7 @@ list(APPEND armnnClBackendWorkloads_sources
ClWorkloadUtils.hpp
)
-add_library(armnnClBackendWorkloads STATIC ${armnnClBackendWorkloads_sources})
+add_library(armnnClBackendWorkloads OBJECT ${armnnClBackendWorkloads_sources})
target_include_directories(armnnClBackendWorkloads PRIVATE ${PROJECT_SOURCE_DIR}/src)
target_include_directories(armnnClBackendWorkloads PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn)
target_include_directories(armnnClBackendWorkloads PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils)
diff --git a/src/backends/neon/CMakeLists.txt b/src/backends/neon/CMakeLists.txt
index 152955aa06..c44dcc1075 100644
--- a/src/backends/neon/CMakeLists.txt
+++ b/src/backends/neon/CMakeLists.txt
@@ -29,7 +29,7 @@ else()
)
endif()
-add_library(armnnNeonBackend STATIC ${armnnNeonBackend_sources})
+add_library(armnnNeonBackend OBJECT ${armnnNeonBackend_sources})
target_include_directories(armnnNeonBackend PRIVATE ${PROJECT_SOURCE_DIR}/src)
target_include_directories(armnnNeonBackend PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn)
target_include_directories(armnnNeonBackend PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils)
diff --git a/src/backends/neon/NeonBackend.cpp b/src/backends/neon/NeonBackend.cpp
index 2e235b6c2a..3c12f7766d 100644
--- a/src/backends/neon/NeonBackend.cpp
+++ b/src/backends/neon/NeonBackend.cpp
@@ -16,10 +16,8 @@ namespace armnn
namespace
{
-static const BackendId s_Id{"CpuAcc"};
-
static BackendRegistry::Helper g_RegisterHelper{
- s_Id,
+ NeonBackend::GetIdStatic(),
[]()
{
return IBackendUniquePtr(new NeonBackend, &NeonBackend::Destroy);
@@ -28,8 +26,9 @@ static BackendRegistry::Helper g_RegisterHelper{
}
-const BackendId& NeonBackend::GetId() const
+const BackendId& NeonBackend::GetIdStatic()
{
+ static const BackendId s_Id{"CpuAcc"};
return s_Id;
}
diff --git a/src/backends/neon/NeonBackend.hpp b/src/backends/neon/NeonBackend.hpp
index fa2cad13ee..c7f7f6e380 100644
--- a/src/backends/neon/NeonBackend.hpp
+++ b/src/backends/neon/NeonBackend.hpp
@@ -16,7 +16,8 @@ public:
NeonBackend() = default;
~NeonBackend() = default;
- const BackendId& GetId() const override;
+ static const BackendId& GetIdStatic();
+ const BackendId& GetId() const override { return GetIdStatic(); }
const ILayerSupport& GetLayerSupport() const override;
diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp
index 91be98182a..1223ba893a 100644
--- a/src/backends/neon/NeonLayerSupport.hpp
+++ b/src/backends/neon/NeonLayerSupport.hpp
@@ -4,10 +4,7 @@
//
#pragma once
-#include <armnn/DescriptorsFwd.hpp>
-#include <armnn/Optional.hpp>
-#include <armnn/Types.hpp>
-#include <armnn/Tensor.hpp>
+#include <armnn/ILayerSupport.hpp>
namespace armnn
{
diff --git a/src/backends/neon/test/CMakeLists.txt b/src/backends/neon/test/CMakeLists.txt
index 4a3380c3f9..384a5e1749 100644
--- a/src/backends/neon/test/CMakeLists.txt
+++ b/src/backends/neon/test/CMakeLists.txt
@@ -14,4 +14,4 @@ list(APPEND armnnNeonBackendUnitTests_sources
add_library(armnnNeonBackendUnitTests OBJECT ${armnnNeonBackendUnitTests_sources})
target_include_directories(armnnNeonBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src)
target_include_directories(armnnNeonBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn)
-target_include_directories(armnnNeonBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils) \ No newline at end of file
+target_include_directories(armnnNeonBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils)
diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt
index 0b0b9ed5a0..fddbcb5d97 100644
--- a/src/backends/neon/workloads/CMakeLists.txt
+++ b/src/backends/neon/workloads/CMakeLists.txt
@@ -61,7 +61,7 @@ list(APPEND armnnNeonBackendWorkloads_sources
NeonWorkloadUtils.hpp
)
-add_library(armnnNeonBackendWorkloads STATIC ${armnnNeonBackendWorkloads_sources})
+add_library(armnnNeonBackendWorkloads OBJECT ${armnnNeonBackendWorkloads_sources})
target_include_directories(armnnNeonBackendWorkloads PRIVATE ${PROJECT_SOURCE_DIR}/src)
target_include_directories(armnnNeonBackendWorkloads PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn)
target_include_directories(armnnNeonBackendWorkloads PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils)
diff --git a/src/backends/reference/CMakeLists.txt b/src/backends/reference/CMakeLists.txt
index 5aa3fc27f5..05ef7d5a8f 100644
--- a/src/backends/reference/CMakeLists.txt
+++ b/src/backends/reference/CMakeLists.txt
@@ -12,7 +12,7 @@ list(APPEND armnnRefBackend_sources
RefWorkloadFactory.hpp
)
-add_library(armnnRefBackend STATIC ${armnnRefBackend_sources})
+add_library(armnnRefBackend OBJECT ${armnnRefBackend_sources})
target_include_directories(armnnRefBackend PRIVATE ${PROJECT_SOURCE_DIR}/src)
target_include_directories(armnnRefBackend PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn)
target_include_directories(armnnRefBackend PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils)
diff --git a/src/backends/reference/RefBackend.cpp b/src/backends/reference/RefBackend.cpp
index ef52a5edeb..1f08d82b98 100644
--- a/src/backends/reference/RefBackend.cpp
+++ b/src/backends/reference/RefBackend.cpp
@@ -15,10 +15,9 @@ namespace armnn
namespace
{
-const BackendId s_Id{"CpuRef"};
static BackendRegistry::Helper s_RegisterHelper{
- s_Id,
+ RefBackend::GetIdStatic(),
[]()
{
return IBackendUniquePtr(new RefBackend, &RefBackend::Destroy);
@@ -27,8 +26,9 @@ static BackendRegistry::Helper s_RegisterHelper{
}
-const BackendId& RefBackend::GetId() const
+const BackendId& RefBackend::GetIdStatic()
{
+ static const BackendId s_Id{"CpuRef"};
return s_Id;
}
diff --git a/src/backends/reference/RefBackend.hpp b/src/backends/reference/RefBackend.hpp
index dcc974167d..c206dbdaf3 100644
--- a/src/backends/reference/RefBackend.hpp
+++ b/src/backends/reference/RefBackend.hpp
@@ -16,7 +16,8 @@ public:
RefBackend() = default;
~RefBackend() = default;
- const BackendId& GetId() const override;
+ static const BackendId& GetIdStatic();
+ const BackendId& GetId() const override { return GetIdStatic(); }
const ILayerSupport& GetLayerSupport() const override;
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 2ee942cc2e..3a250a6981 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -3,8 +3,10 @@
// SPDX-License-Identifier: MIT
//
-#include "LayerSupportCommon.hpp"
#include "RefLayerSupport.hpp"
+
+#include <LayerSupportCommon.hpp>
+
#include <armnn/Descriptors.hpp>
#include <armnn/Types.hpp>
#include <armnn/Tensor.hpp>
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index 1d0edf6cb3..40bca7f179 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -4,10 +4,7 @@
//
#pragma once
-#include <armnn/DescriptorsFwd.hpp>
-#include <armnn/Types.hpp>
-#include <armnn/Tensor.hpp>
-#include <layers/LstmLayer.hpp>
+#include <armnn/ILayerSupport.hpp>
namespace armnn
{
diff --git a/src/backends/reference/test/CMakeLists.txt b/src/backends/reference/test/CMakeLists.txt
index 511d747202..deee364a9a 100644
--- a/src/backends/reference/test/CMakeLists.txt
+++ b/src/backends/reference/test/CMakeLists.txt
@@ -12,4 +12,4 @@ list(APPEND armnnRefBackendUnitTests_sources
add_library(armnnRefBackendUnitTests OBJECT ${armnnRefBackendUnitTests_sources})
target_include_directories(armnnRefBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src)
target_include_directories(armnnRefBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn)
-target_include_directories(armnnRefBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils) \ No newline at end of file
+target_include_directories(armnnRefBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils)
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index 5a756e4596..be71a85047 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -102,7 +102,7 @@ list(APPEND armnnRefBackendWorkloads_sources
RefMeanUint8Workload.hpp
)
-add_library(armnnRefBackendWorkloads STATIC ${armnnRefBackendWorkloads_sources})
+add_library(armnnRefBackendWorkloads OBJECT ${armnnRefBackendWorkloads_sources})
target_include_directories(armnnRefBackendWorkloads PRIVATE ${PROJECT_SOURCE_DIR}/src)
target_include_directories(armnnRefBackendWorkloads PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn)
target_include_directories(armnnRefBackendWorkloads PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils)