aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/armnn/BackendId.hpp23
-rw-r--r--include/armnn/ILayerSupport.hpp90
-rw-r--r--include/armnn/LayerSupport.hpp152
-rw-r--r--src/armnn/Layer.cpp2
-rw-r--r--src/armnn/LayerSupport.cpp239
-rw-r--r--src/armnn/Profiling.cpp8
-rw-r--r--src/armnn/Profiling.hpp18
-rw-r--r--src/armnn/ProfilingEvent.cpp12
-rw-r--r--src/armnn/ProfilingEvent.hpp18
-rw-r--r--src/armnn/test/ProfilingEventTest.cpp20
-rw-r--r--src/backends/backendsCommon/CMakeLists.txt3
-rw-r--r--src/backends/backendsCommon/ILayerSupport.cpp346
-rw-r--r--src/backends/backendsCommon/LayerSupportBase.cpp355
-rw-r--r--src/backends/backendsCommon/LayerSupportBase.hpp223
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp24
-rw-r--r--src/backends/backendsCommon/common.mk2
-rw-r--r--src/backends/cl/ClLayerSupport.cpp11
-rw-r--r--src/backends/cl/ClLayerSupport.hpp9
-rw-r--r--src/backends/neon/NeonLayerSupport.cpp19
-rw-r--r--src/backends/neon/NeonLayerSupport.hpp12
-rw-r--r--src/backends/reference/RefLayerSupport.cpp13
-rw-r--r--src/backends/reference/RefLayerSupport.hpp11
22 files changed, 960 insertions, 650 deletions
diff --git a/include/armnn/BackendId.hpp b/include/armnn/BackendId.hpp
index 8de985ec2f..87206073be 100644
--- a/include/armnn/BackendId.hpp
+++ b/include/armnn/BackendId.hpp
@@ -20,13 +20,13 @@ namespace armnn
//
enum class Compute
{
+ Undefined = 0,
/// CPU Execution: Reference C++ kernels
- CpuRef = 0,
+ CpuRef = 1,
/// CPU Execution: NEON: ArmCompute
- CpuAcc = 1,
+ CpuAcc = 2,
/// GPU Execution: OpenCL: ArmCompute
- GpuAcc = 2,
- Undefined = 5
+ GpuAcc = 3
};
/// Deprecated function that will be removed together with
@@ -46,7 +46,8 @@ constexpr char const* GetComputeDeviceAsCString(Compute compute)
/// the Compute enum
inline std::ostream& operator<<(std::ostream& os, const std::vector<Compute>& compute)
{
- for (const Compute& comp : compute) {
+ for (const Compute& comp : compute)
+ {
os << GetComputeDeviceAsCString(comp) << " ";
}
return os;
@@ -56,7 +57,8 @@ inline std::ostream& operator<<(std::ostream& os, const std::vector<Compute>& co
/// the Compute enum
inline std::ostream& operator<<(std::ostream& os, const std::set<Compute>& compute)
{
- for (const Compute& comp : compute) {
+ for (const Compute& comp : compute)
+ {
os << GetComputeDeviceAsCString(comp) << " ";
}
return os;
@@ -70,13 +72,10 @@ inline std::ostream& operator<<(std::ostream& os, const Compute& compute)
return os;
}
-struct UninitializedBackendId {};
-
class BackendId final
{
public:
- BackendId() { GetComputeDeviceAsCString(Compute::Undefined); }
- BackendId(UninitializedBackendId) { GetComputeDeviceAsCString(Compute::Undefined); }
+ BackendId() : m_Id(GetComputeDeviceAsCString(Compute::Undefined)) {}
BackendId(const std::string& id) : m_Id{id} {}
BackendId(const char* id) : m_Id{id} {}
@@ -132,7 +131,7 @@ private:
std::string m_Id;
};
-}
+} // namespace armnn
namespace std
{
@@ -163,7 +162,7 @@ inline std::ostream& operator<<(std::ostream& os, const BackendId& id)
template <template <typename...> class TContainer, typename... TContainerTemplateArgs>
std::ostream& operator<<(std::ostream& os,
- const TContainer<BackendId, TContainerTemplateArgs...>& ids)
+ const TContainer<BackendId, TContainerTemplateArgs...>& ids)
{
os << '[';
for (const auto& id : ids) { os << id << " "; }
diff --git a/include/armnn/ILayerSupport.hpp b/include/armnn/ILayerSupport.hpp
index af38b07f39..bba344975a 100644
--- a/include/armnn/ILayerSupport.hpp
+++ b/include/armnn/ILayerSupport.hpp
@@ -25,12 +25,12 @@ public:
virtual bool IsActivationSupported(const TensorInfo& input,
const TensorInfo& output,
const ActivationDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
virtual bool IsAdditionSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
virtual bool IsBatchNormalizationSupported(const TensorInfo& input,
const TensorInfo& output,
@@ -39,75 +39,76 @@ public:
const TensorInfo& beta,
const TensorInfo& gamma,
const BatchNormalizationDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
virtual bool IsBatchToSpaceNdSupported(const TensorInfo& input,
const TensorInfo& output,
const BatchToSpaceNdDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
virtual bool IsConstantSupported(const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
virtual bool IsConvertFp16ToFp32Supported(const TensorInfo& input,
const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
virtual bool IsConvertFp32ToFp16Supported(const TensorInfo& input,
const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
virtual bool IsConvolution2dSupported(const TensorInfo& input,
const TensorInfo& output,
const Convolution2dDescriptor& descriptor,
const TensorInfo& weights,
const Optional<TensorInfo>& biases,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
virtual bool IsDebugSupported(const TensorInfo& input,
const TensorInfo& output,
const DebugDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
- virtual bool IsDepthwiseConvolutionSupported(const TensorInfo& input,
- const TensorInfo& output,
- const DepthwiseConvolution2dDescriptor& descriptor,
- const TensorInfo& weights,
- const Optional<TensorInfo>& biases,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+ virtual bool IsDepthwiseConvolutionSupported(
+ const TensorInfo& input,
+ const TensorInfo& output,
+ const DepthwiseConvolution2dDescriptor& descriptor,
+ const TensorInfo& weights,
+ const Optional<TensorInfo>& biases,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
virtual bool IsDivisionSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
virtual bool IsEqualSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
virtual bool IsFakeQuantizationSupported(const TensorInfo& input,
const FakeQuantizationDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
virtual bool IsFloorSupported(const TensorInfo& input,
const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
virtual bool IsFullyConnectedSupported(const TensorInfo& input,
const TensorInfo& output,
const TensorInfo& weights,
const TensorInfo& biases,
const FullyConnectedDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
virtual bool IsInputSupported(const TensorInfo& input,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
virtual bool IsL2NormalizationSupported(const TensorInfo& input,
const TensorInfo& output,
const L2NormalizationDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
virtual bool IsLstmSupported(const TensorInfo& input,
const TensorInfo& outputStateIn,
@@ -134,94 +135,99 @@ public:
const TensorInfo* projectionBias,
const TensorInfo* cellToForgetWeights,
const TensorInfo* cellToOutputWeights,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
virtual bool IsMaximumSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
virtual bool IsMeanSupported(const TensorInfo& input,
const TensorInfo& output,
const MeanDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+
+ virtual bool IsMemCopySupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
virtual bool IsMergerSupported(const std::vector<const TensorInfo*> inputs,
const TensorInfo& output,
const OriginsDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
virtual bool IsMinimumSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& ouput,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
virtual bool IsMultiplicationSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
virtual bool IsNormalizationSupported(const TensorInfo& input,
const TensorInfo& output,
const NormalizationDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
virtual bool IsOutputSupported(const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
virtual bool IsPadSupported(const TensorInfo& input,
const TensorInfo& output,
const PadDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
virtual bool IsPermuteSupported(const TensorInfo& input,
const TensorInfo& output,
const PermuteDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
virtual bool IsPooling2dSupported(const TensorInfo& input,
const TensorInfo& output,
const Pooling2dDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
virtual bool IsReshapeSupported(const TensorInfo& input,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+ const ReshapeDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
virtual bool IsSpaceToBatchNdSupported(const TensorInfo& input,
const TensorInfo& output,
const SpaceToBatchNdDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
virtual bool IsResizeBilinearSupported(const TensorInfo& input,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
virtual bool IsRsqrtSupported(const TensorInfo& input,
const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
virtual bool IsSoftmaxSupported(const TensorInfo& input,
const TensorInfo& output,
const SoftmaxDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
virtual bool IsSplitterSupported(const TensorInfo& input,
const ViewsDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
virtual bool IsStridedSliceSupported(const TensorInfo& input,
const TensorInfo& output,
const StridedSliceDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
virtual bool IsSubtractionSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
virtual bool IsGreaterSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& ouput,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
}; // class ILayerSupport
using ILayerSupportSharedPtr = std::shared_ptr<ILayerSupport>;
diff --git a/include/armnn/LayerSupport.hpp b/include/armnn/LayerSupport.hpp
index 446dd705b0..8286ec6109 100644
--- a/include/armnn/LayerSupport.hpp
+++ b/include/armnn/LayerSupport.hpp
@@ -105,16 +105,24 @@ bool IsDivisionSupported(const BackendId& backend,
size_t reasonIfUnsupportedMaxLength = 1024);
/// Deprecated in favor of IBackend and ILayerSupport interfaces
-bool IsSubtractionSupported(const BackendId& backend,
- const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- char* reasonIfUnsupported = nullptr,
- size_t reasonIfUnsupportedMaxLength = 1024);
+bool IsEqualSupported(const BackendId& backend,
+ const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ char* reasonIfUnsupported = nullptr,
+ size_t reasonIfUnsupportedMaxLength = 1024);
/// Deprecated in favor of IBackend and ILayerSupport interfaces
-bool IsInputSupported(const BackendId& backend,
+bool IsFakeQuantizationSupported(const BackendId& backend,
+ const TensorInfo& input,
+ const FakeQuantizationDescriptor& descriptor,
+ char* reasonIfUnsupported = nullptr,
+ size_t reasonIfUnsupportedMaxLength = 1024);
+
+/// Deprecated in favor of IBackend and ILayerSupport interfaces
+bool IsFloorSupported(const BackendId& backend,
const TensorInfo& input,
+ const TensorInfo& output,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
@@ -129,6 +137,20 @@ bool IsFullyConnectedSupported(const BackendId& backend,
size_t reasonIfUnsupportedMaxLength = 1024);
/// Deprecated in favor of IBackend and ILayerSupport interfaces
+bool IsGreaterSupported(const BackendId& backend,
+ const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ char* reasonIfUnsupported = nullptr,
+ size_t reasonIfUnsupportedMaxLength = 1024);
+
+/// Deprecated in favor of IBackend and ILayerSupport interfaces
+bool IsInputSupported(const BackendId& backend,
+ const TensorInfo& input,
+ char* reasonIfUnsupported = nullptr,
+ size_t reasonIfUnsupportedMaxLength = 1024);
+
+/// Deprecated in favor of IBackend and ILayerSupport interfaces
bool IsL2NormalizationSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
@@ -161,6 +183,21 @@ bool IsMaximumSupported(const BackendId& backend,
size_t reasonIfUnSupportedMaxLength = 0);
/// Deprecated in favor of IBackend and ILayerSupport interfaces
+bool IsMeanSupported(const BackendId& backend,
+ const TensorInfo& input,
+ const TensorInfo& output,
+ const MeanDescriptor& descriptor,
+ char* reasonIfUnsupported = nullptr,
+ size_t reasonIfUnsupportedMaxLength = 1024);
+
+/// Deprecated in favor of IBackend and ILayerSupport interfaces
+bool IsMemCopySupported(const BackendId& backend,
+ const TensorInfo& input,
+ const TensorInfo& output,
+ char* reasonIfUnsupported = nullptr,
+ size_t reasonIfUnsupportedMaxLength = 1024);
+
+/// Deprecated in favor of IBackend and ILayerSupport interfaces
bool IsMergerSupported(const BackendId& backend,
const std::vector<const TensorInfo*> inputs,
const TensorInfo& output,
@@ -169,6 +206,14 @@ bool IsMergerSupported(const BackendId& backend,
size_t reasonIfUnsupportedMaxLength = 1024);
/// Deprecated in favor of IBackend and ILayerSupport interfaces
+bool IsMinimumSupported(const BackendId& backend,
+ const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ char* reasonIfUnsupported = nullptr,
+ size_t reasonIfUnsupportedMaxLength = 1024);
+
+/// Deprecated in favor of IBackend and ILayerSupport interfaces
bool IsMultiplicationSupported(const BackendId& backend,
const TensorInfo& input0,
const TensorInfo& input1,
@@ -191,6 +236,14 @@ bool IsOutputSupported(const BackendId& backend,
size_t reasonIfUnsupportedMaxLength = 1024);
/// Deprecated in favor of IBackend and ILayerSupport interfaces
+bool IsPadSupported(const BackendId& backend,
+ const TensorInfo& input,
+ const TensorInfo& output,
+ const PadDescriptor& descriptor,
+ char* reasonIfUnsupported = nullptr,
+ size_t reasonIfUnsupportedMaxLength = 1024);
+
+/// Deprecated in favor of IBackend and ILayerSupport interfaces
bool IsPermuteSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
@@ -207,12 +260,26 @@ bool IsPooling2dSupported(const BackendId& backend,
size_t reasonIfUnsupportedMaxLength = 1024);
/// Deprecated in favor of IBackend and ILayerSupport interfaces
+bool IsReshapeSupported(const BackendId& backend,
+ const TensorInfo& input,
+ const ReshapeDescriptor& descriptor,
+ char* reasonIfUnsupported = nullptr,
+ size_t reasonIfUnsupportedMaxLength = 1024);
+
+/// Deprecated in favor of IBackend and ILayerSupport interfaces
bool IsResizeBilinearSupported(const BackendId& backend,
const TensorInfo& input,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
/// Deprecated in favor of IBackend and ILayerSupport interfaces
+bool IsRsqrtSupported(const BackendId& backend,
+ const TensorInfo& input,
+ const TensorInfo& output,
+ char* reasonIfUnsupported = nullptr,
+ size_t reasonIfUnsupportedMaxLength = 1024);
+
+/// Deprecated in favor of IBackend and ILayerSupport interfaces
bool IsSoftmaxSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
@@ -236,49 +303,6 @@ bool IsSplitterSupported(const BackendId& backend,
size_t reasonIfUnsupportedMaxLength = 1024);
/// Deprecated in favor of IBackend and ILayerSupport interfaces
-bool IsFakeQuantizationSupported(const BackendId& backend,
- const TensorInfo& input,
- const FakeQuantizationDescriptor& descriptor,
- char* reasonIfUnsupported = nullptr,
- size_t reasonIfUnsupportedMaxLength = 1024);
-
-/// Deprecated in favor of IBackend and ILayerSupport interfaces
-bool IsReshapeSupported(const BackendId& backend,
- const TensorInfo& input,
- char* reasonIfUnsupported = nullptr,
- size_t reasonIfUnsupportedMaxLength = 1024);
-
-/// Deprecated in favor of IBackend and ILayerSupport interfaces
-bool IsRsqrtSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- char* reasonIfUnsupported = nullptr,
- size_t reasonIfUnsupportedMaxLength = 1024);
-
-/// Deprecated in favor of IBackend and ILayerSupport interfaces
-bool IsFloorSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- char* reasonIfUnsupported = nullptr,
- size_t reasonIfUnsupportedMaxLength = 1024);
-
-/// Deprecated in favor of IBackend and ILayerSupport interfaces
-bool IsMeanSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- const MeanDescriptor& descriptor,
- char* reasonIfUnsupported = nullptr,
- size_t reasonIfUnsupportedMaxLength = 1024);
-
-/// Deprecated in favor of IBackend and ILayerSupport interfaces
-bool IsPadSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- const PadDescriptor& descriptor,
- char* reasonIfUnsupported = nullptr,
- size_t reasonIfUnsupportedMaxLength = 1024);
-
-/// Deprecated in favor of IBackend and ILayerSupport interfaces
bool IsStridedSliceSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
@@ -287,26 +311,10 @@ bool IsStridedSliceSupported(const BackendId& backend,
size_t reasonIfUnsupportedMaxLength = 1024);
/// Deprecated in favor of IBackend and ILayerSupport interfaces
-bool IsMinimumSupported(const BackendId& backend,
- const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- char* reasonIfUnsupported = nullptr,
- size_t reasonIfUnsupportedMaxLength = 1024);
-
-/// Deprecated in favor of IBackend and ILayerSupport interfaces
-bool IsGreaterSupported(const BackendId& backend,
- const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- char* reasonIfUnsupported = nullptr,
- size_t reasonIfUnsupportedMaxLength = 1024);
-
-/// Deprecated in favor of IBackend and ILayerSupport interfaces
-bool IsEqualSupported(const BackendId& backend,
- const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- char* reasonIfUnsupported = nullptr,
- size_t reasonIfUnsupportedMaxLength = 1024);
+bool IsSubtractionSupported(const BackendId& backend,
+ const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ char* reasonIfUnsupported = nullptr,
+ size_t reasonIfUnsupportedMaxLength = 1024);
}
diff --git a/src/armnn/Layer.cpp b/src/armnn/Layer.cpp
index cf825e39fd..85e1de0b09 100644
--- a/src/armnn/Layer.cpp
+++ b/src/armnn/Layer.cpp
@@ -132,7 +132,7 @@ Layer::Layer(unsigned int numInputSlots,
: m_OutputHandlers(numOutputSlots)
, m_LayerName(name ? name : "")
, m_Type(type)
-, m_BackendId(UninitializedBackendId())
+, m_BackendId()
, m_Guid(GenerateLayerGuid())
{
m_InputSlots.reserve(numInputSlots);
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp
index cf6ce27dda..b0b3eccb02 100644
--- a/src/armnn/LayerSupport.cpp
+++ b/src/armnn/LayerSupport.cpp
@@ -16,11 +16,9 @@
#include <unordered_map>
#include <armnn/ArmNN.hpp>
-namespace armnn
-{
-
namespace
{
+
/// Helper function to copy a full string to a truncated version.
void CopyErrorMessage(char* truncatedString, const char* fullString, size_t maxLength)
{
@@ -33,10 +31,13 @@ void CopyErrorMessage(char* truncatedString, const char* fullString, size_t maxL
}
}
-}
+} // anonymous namespace
+
+namespace armnn
+{
// Helper macro to avoid code duplication.
-// Forwards function func to funcRef, funcNeon or funcCl, depending on the value of compute.
+// Forwards function func to funcRef, funcNeon or funcCl, depending on the value of backendId.
#define FORWARD_LAYER_SUPPORT_FUNC(backendId, func, ...) \
std::string reasonIfUnsupportedFull; \
bool isSupported; \
@@ -177,6 +178,18 @@ bool IsDebugSupported(const BackendId& backend,
FORWARD_LAYER_SUPPORT_FUNC(backend, IsDebugSupported, input, output, descriptor);
}
+bool IsDepthwiseConvolutionSupported(const BackendId& backend,
+ const TensorInfo& input,
+ const TensorInfo& output,
+ const DepthwiseConvolution2dDescriptor& descriptor,
+ const TensorInfo& weights,
+ const Optional<TensorInfo>& biases,
+ char* reasonIfUnsupported,
+ size_t reasonIfUnsupportedMaxLength)
+{
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsDepthwiseConvolutionSupported, input, output, descriptor, weights, biases);
+}
+
bool IsDivisionSupported(const BackendId& backend,
const TensorInfo& input0,
const TensorInfo& input1,
@@ -187,36 +200,39 @@ bool IsDivisionSupported(const BackendId& backend,
FORWARD_LAYER_SUPPORT_FUNC(backend, IsDivisionSupported, input0, input1, output);
}
-bool IsSubtractionSupported(const BackendId& backend,
- const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
+bool IsEqualSupported(const BackendId& backend,
+ const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ char* reasonIfUnsupported,
+ size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsSubtractionSupported, input0, input1, output);
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsEqualSupported, input0, input1, output);
}
-bool IsDepthwiseConvolutionSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- const DepthwiseConvolution2dDescriptor& descriptor,
- const TensorInfo& weights,
- const Optional<TensorInfo>& biases,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
+bool IsFakeQuantizationSupported(const BackendId& backend,
+ const TensorInfo& input,
+ const FakeQuantizationDescriptor& descriptor,
+ char* reasonIfUnsupported,
+ size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsDepthwiseConvolutionSupported, input, output, descriptor, weights, biases);
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsFakeQuantizationSupported, input, descriptor);
}
-bool IsInputSupported(const BackendId& backend,
+bool IsFloorSupported(const BackendId& backend,
const TensorInfo& input,
+ const TensorInfo& output,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsInputSupported, input);
-}
+ // By definition (that is, regardless of compute device), shapes and data type must match.
+ if (input.GetShape() != output.GetShape() || input.GetDataType() != output.GetDataType())
+ {
+ return false;
+ }
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsFloorSupported, input, output);
+}
bool IsFullyConnectedSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
@@ -229,6 +245,25 @@ bool IsFullyConnectedSupported(const BackendId& backend,
FORWARD_LAYER_SUPPORT_FUNC(backend, IsFullyConnectedSupported, input, output, weights, biases, descriptor);
}
+bool IsGreaterSupported(const BackendId& backend,
+ const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ char* reasonIfUnsupported,
+ size_t reasonIfUnsupportedMaxLength)
+{
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsGreaterSupported, input0, input1, output);
+}
+
+bool IsInputSupported(const BackendId& backend,
+ const TensorInfo& input,
+ char* reasonIfUnsupported,
+ size_t reasonIfUnsupportedMaxLength)
+{
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsInputSupported, input);
+}
+
+
bool IsL2NormalizationSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
@@ -276,6 +311,25 @@ bool IsMaximumSupported(const BackendId& backend,
FORWARD_LAYER_SUPPORT_FUNC(backend, IsMaximumSupported, input0, input1, output);
}
+bool IsMeanSupported(const BackendId& backend,
+ const TensorInfo& input,
+ const TensorInfo& output,
+ const MeanDescriptor& descriptor,
+ char* reasonIfUnsupported,
+ size_t reasonIfUnsupportedMaxLength)
+{
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsMeanSupported, input, output, descriptor);
+}
+
+bool IsMemCopySupported(const BackendId &backend,
+ const TensorInfo &input,
+ const TensorInfo &output,
+ char *reasonIfUnsupported,
+ size_t reasonIfUnsupportedMaxLength)
+{
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsMemCopySupported, input, output);
+}
+
bool IsMergerSupported(const BackendId& backend,
std::vector<const TensorInfo*> inputs,
const TensorInfo& output,
@@ -287,6 +341,16 @@ bool IsMergerSupported(const BackendId& backend,
FORWARD_LAYER_SUPPORT_FUNC(backend, IsMergerSupported, inputs, output, descriptor);
}
+bool IsMinimumSupported(const BackendId& backend,
+ const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ char* reasonIfUnsupported,
+ size_t reasonIfUnsupportedMaxLength)
+{
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsMinimumSupported, input0, input1, output);
+}
+
bool IsMultiplicationSupported(const BackendId& backend,
const TensorInfo& input0,
const TensorInfo& input1,
@@ -315,6 +379,17 @@ bool IsOutputSupported(const BackendId& backend,
FORWARD_LAYER_SUPPORT_FUNC(backend, IsOutputSupported, output);
}
+bool IsPadSupported(const BackendId& backend,
+ const TensorInfo& input,
+ const TensorInfo& output,
+ const PadDescriptor& descriptor,
+ char* reasonIfUnsupported,
+ size_t reasonIfUnsupportedMaxLength)
+{
+
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsPadSupported, input, output, descriptor);
+}
+
bool IsPermuteSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
@@ -335,6 +410,15 @@ bool IsPooling2dSupported(const BackendId& backend,
FORWARD_LAYER_SUPPORT_FUNC(backend, IsPooling2dSupported, input, output, descriptor);
}
+bool IsReshapeSupported(const BackendId& backend,
+ const TensorInfo& input,
+ const ReshapeDescriptor& descriptor,
+ char* reasonIfUnsupported,
+ size_t reasonIfUnsupportedMaxLength)
+{
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsReshapeSupported, input, descriptor);
+}
+
bool IsResizeBilinearSupported(const BackendId& backend,
const TensorInfo& input,
char* reasonIfUnsupported,
@@ -343,6 +427,15 @@ bool IsResizeBilinearSupported(const BackendId& backend,
FORWARD_LAYER_SUPPORT_FUNC(backend, IsResizeBilinearSupported, input);
}
+bool IsRsqrtSupported(const BackendId& backend,
+ const TensorInfo& input,
+ const TensorInfo& output,
+ char* reasonIfUnsupported,
+ size_t reasonIfUnsupportedMaxLength)
+{
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsRsqrtSupported, input, output);
+}
+
bool IsSoftmaxSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
@@ -372,68 +465,6 @@ bool IsSplitterSupported(const BackendId& backend,
FORWARD_LAYER_SUPPORT_FUNC(backend, IsSplitterSupported, input, descriptor);
}
-bool IsFakeQuantizationSupported(const BackendId& backend,
- const TensorInfo& input,
- const FakeQuantizationDescriptor& descriptor,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsFakeQuantizationSupported, input, descriptor);
-}
-
-bool IsReshapeSupported(const BackendId& backend,
- const TensorInfo& input,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsReshapeSupported, input);
-}
-
-bool IsRsqrtSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsRsqrtSupported, input, output);
-}
-
-bool IsFloorSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- // By definition (that is, regardless of compute device), shapes and data type must match.
- if (input.GetShape() != output.GetShape() || input.GetDataType() != output.GetDataType())
- {
- return false;
- }
-
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsFloorSupported, input, output);
-}
-
-bool IsMeanSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- const MeanDescriptor& descriptor,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsMeanSupported, input, output, descriptor);
-}
-
-bool IsPadSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- const PadDescriptor& descriptor,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
-
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsPadSupported, input, output, descriptor);
-}
-
bool IsStridedSliceSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
@@ -444,34 +475,14 @@ bool IsStridedSliceSupported(const BackendId& backend,
FORWARD_LAYER_SUPPORT_FUNC(backend, IsStridedSliceSupported, input, output, descriptor);
}
-bool IsMinimumSupported(const BackendId& backend,
- const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsMinimumSupported, input0, input1, output);
-}
-
-bool IsGreaterSupported(const BackendId& backend,
- const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsGreaterSupported, input0, input1, output);
-}
-
-bool IsEqualSupported(const BackendId& backend,
- const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
+bool IsSubtractionSupported(const BackendId& backend,
+ const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ char* reasonIfUnsupported,
+ size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsEqualSupported, input0, input1, output);
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsSubtractionSupported, input0, input1, output);
}
-}
+} // namespace armnn
diff --git a/src/armnn/Profiling.cpp b/src/armnn/Profiling.cpp
index 4caa31905c..c153eb67fa 100644
--- a/src/armnn/Profiling.cpp
+++ b/src/armnn/Profiling.cpp
@@ -132,7 +132,7 @@ void Profiler::AnalyzeEventSequenceAndWriteResults(ItertType first, ItertType la
<< std::setw(20) << durationMs
<< std::setw(20) << startTimeMs
<< std::setw(20) << stopTimeMs
- << std::setw(20) << GetComputeDeviceAsCString(eventPtr->GetComputeDevice())
+ << std::setw(20) << eventPtr->GetBackendId().Get()
<< std::endl;
}
outStream << std::endl;
@@ -194,10 +194,12 @@ void Profiler::EnableProfiling(bool enableProfiling)
m_ProfilingEnabled = enableProfiling;
}
-Event* Profiler::BeginEvent(Compute compute, const std::string& label, std::vector<InstrumentPtr>&& instruments)
+Event* Profiler::BeginEvent(const BackendId& backendId,
+ const std::string& label,
+ std::vector<InstrumentPtr>&& instruments)
{
Event* parent = m_Parents.empty() ? nullptr : m_Parents.top();
- m_EventSequence.push_back(std::make_unique<Event>(label, this, parent, compute, std::move(instruments)));
+ m_EventSequence.push_back(std::make_unique<Event>(label, this, parent, backendId, std::move(instruments)));
Event* event = m_EventSequence.back().get();
event->Start();
diff --git a/src/armnn/Profiling.hpp b/src/armnn/Profiling.hpp
index ef6bfd5ffb..0fb60d346a 100644
--- a/src/armnn/Profiling.hpp
+++ b/src/armnn/Profiling.hpp
@@ -35,7 +35,7 @@ public:
// Marks the beginning of a user-defined event.
// No attempt will be made to copy the name string: it must be known at compile time.
- Event* BeginEvent(Compute compute, const std::string& name, std::vector<InstrumentPtr>&& instruments);
+ Event* BeginEvent(const BackendId& backendId, const std::string& name, std::vector<InstrumentPtr>&& instruments);
// Marks the end of a user-defined event.
void EndEvent(Event* event);
@@ -117,7 +117,7 @@ public:
using InstrumentPtr = std::unique_ptr<Instrument>;
template<typename... Args>
- ScopedProfilingEvent(Compute compute, const std::string& name, Args... args)
+ ScopedProfilingEvent(const BackendId& backendId, const std::string& name, Args... args)
: m_Event(nullptr)
, m_Profiler(ProfilerManager::GetInstance().GetProfiler())
{
@@ -126,7 +126,7 @@ public:
std::vector<InstrumentPtr> instruments(0);
instruments.reserve(sizeof...(args)); //One allocation
ConstructNextInVector(instruments, args...);
- m_Event = m_Profiler->BeginEvent(compute, name, std::move(instruments));
+ m_Event = m_Profiler->BeginEvent(backendId, name, std::move(instruments));
}
}
@@ -152,15 +152,15 @@ private:
ConstructNextInVector(instruments, args...);
}
- Event* m_Event; ///< Event to track
- Profiler* m_Profiler; ///< Profiler used
+ Event* m_Event; ///< Event to track
+ Profiler* m_Profiler; ///< Profiler used
};
} // namespace armnn
// The event name must be known at compile time
-#define ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(compute, /*name,*/ ...) \
- armnn::ScopedProfilingEvent e_##__FILE__##__LINE__(compute, /*name,*/ __VA_ARGS__);
+#define ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(backendId, /*name,*/ ...) \
+ armnn::ScopedProfilingEvent e_##__FILE__##__LINE__(backendId, /*name,*/ __VA_ARGS__);
-#define ARMNN_SCOPED_PROFILING_EVENT(compute, name) \
- ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(compute, name, armnn::WallClockTimer())
+#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name) \
+ ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(backendId, name, armnn::WallClockTimer())
diff --git a/src/armnn/ProfilingEvent.cpp b/src/armnn/ProfilingEvent.cpp
index 55e931a163..60fb2f79c6 100644
--- a/src/armnn/ProfilingEvent.cpp
+++ b/src/armnn/ProfilingEvent.cpp
@@ -11,12 +11,12 @@ namespace armnn
Event::Event(const std::string& eventName,
Profiler* profiler,
Event* parent,
- const Compute computeDevice,
+ const BackendId backendId,
std::vector<InstrumentPtr>&& instruments)
: m_EventName(eventName)
, m_Profiler(profiler)
, m_Parent(parent)
- , m_ComputeDevice(computeDevice)
+ , m_BackendId(backendId)
, m_Instruments(std::move(instruments))
{
}
@@ -25,7 +25,7 @@ Event::Event(Event&& other) noexcept
: m_EventName(std::move(other.m_EventName))
, m_Profiler(other.m_Profiler)
, m_Parent(other.m_Parent)
- , m_ComputeDevice(other.m_ComputeDevice)
+ , m_BackendId(other.m_BackendId)
, m_Instruments(std::move(other.m_Instruments))
{
@@ -79,9 +79,9 @@ const Event* Event::GetParentEvent() const
return m_Parent;
}
-Compute Event::GetComputeDevice() const
+BackendId Event::GetBackendId() const
{
- return m_ComputeDevice;
+ return m_BackendId;
}
Event& Event::operator=(Event&& other) noexcept
@@ -94,7 +94,7 @@ Event& Event::operator=(Event&& other) noexcept
m_EventName = other.m_EventName;
m_Profiler = other.m_Profiler;
m_Parent = other.m_Parent;
- m_ComputeDevice = other.m_ComputeDevice;
+ m_BackendId = other.m_BackendId;
other.m_Profiler = nullptr;
other.m_Parent = nullptr;
return *this;
diff --git a/src/armnn/ProfilingEvent.hpp b/src/armnn/ProfilingEvent.hpp
index 134735530f..9f57753585 100644
--- a/src/armnn/ProfilingEvent.hpp
+++ b/src/armnn/ProfilingEvent.hpp
@@ -27,10 +27,10 @@ public:
using Instruments = std::vector<InstrumentPtr>;
Event(const std::string& eventName,
- Profiler* profiler,
- Event* parent,
- const Compute computeDevice,
- std::vector<InstrumentPtr>&& instrument);
+ Profiler* profiler,
+ Event* parent,
+ const BackendId backendId,
+ std::vector<InstrumentPtr>&& instrument);
Event(const Event& other) = delete;
@@ -62,9 +62,9 @@ public:
/// \return Pointer of the parent event
const Event* GetParentEvent() const;
- /// Get the compute device of the event
- /// \return Compute device of the event
- Compute GetComputeDevice() const;
+ /// Get the backend id of the event
+ /// \return Backend id of the event
+ BackendId GetBackendId() const;
/// Assignment operator
Event& operator=(const Event& other) = delete;
@@ -82,8 +82,8 @@ private:
/// Stores optional parent event
Event* m_Parent;
- /// Compute device
- Compute m_ComputeDevice;
+ /// Backend id
+ BackendId m_BackendId;
/// Instruments to use
Instruments m_Instruments;
diff --git a/src/armnn/test/ProfilingEventTest.cpp b/src/armnn/test/ProfilingEventTest.cpp
index 9e31ccb323..0add8365e9 100644
--- a/src/armnn/test/ProfilingEventTest.cpp
+++ b/src/armnn/test/ProfilingEventTest.cpp
@@ -2,10 +2,12 @@
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
#include <boost/test/unit_test.hpp>
#include "ProfilingEvent.hpp"
#include "Profiling.hpp"
+
#include <thread>
using namespace armnn;
@@ -24,7 +26,7 @@ BOOST_AUTO_TEST_CASE(ProfilingEventTest)
Event testEvent(eventName,
nullptr,
nullptr,
- armnn::Compute::Undefined,
+ BackendId(),
std::move(insts1));
BOOST_CHECK_EQUAL(testEvent.GetName(), "EventName");
@@ -41,17 +43,18 @@ BOOST_AUTO_TEST_CASE(ProfilingEventTest)
BOOST_CHECK_GE(testEvent.GetMeasurements().front().m_Value, 10.0);
// create a sub event with CpuAcc
+ BackendId cpuAccBackendId(Compute::CpuAcc);
Event::Instruments insts2;
insts2.emplace_back(std::make_unique<WallClockTimer>());
Event testEvent2(eventName,
profileManager.GetProfiler(),
&testEvent,
- Compute::CpuAcc,
+ cpuAccBackendId,
std::move(insts2));
BOOST_CHECK_EQUAL(&testEvent, testEvent2.GetParentEvent());
BOOST_CHECK_EQUAL(profileManager.GetProfiler(), testEvent2.GetProfiler());
- BOOST_CHECK_EQUAL(Compute::CpuAcc, testEvent2.GetComputeDevice());
+ BOOST_CHECK(cpuAccBackendId == testEvent2.GetBackendId());
}
BOOST_AUTO_TEST_CASE(ProfilingEventTestOnGpuAcc)
@@ -66,7 +69,7 @@ BOOST_AUTO_TEST_CASE(ProfilingEventTestOnGpuAcc)
Event testEvent(eventName,
nullptr,
nullptr,
- armnn::Compute::Undefined,
+ BackendId(),
std::move(insts1));
BOOST_CHECK_EQUAL(testEvent.GetName(), "GPUEvent");
@@ -83,13 +86,18 @@ BOOST_AUTO_TEST_CASE(ProfilingEventTestOnGpuAcc)
BOOST_CHECK_GE(testEvent.GetMeasurements().front().m_Value, 10.0);
// create a sub event
+ BackendId gpuAccBackendId(Compute::GpuAcc);
Event::Instruments insts2;
insts2.emplace_back(std::make_unique<WallClockTimer>());
- Event testEvent2(eventName, profileManager.GetProfiler(), &testEvent, Compute::GpuAcc, std::move(insts2));
+ Event testEvent2(eventName,
+ profileManager.GetProfiler(),
+ &testEvent,
+ gpuAccBackendId,
+ std::move(insts2));
BOOST_CHECK_EQUAL(&testEvent, testEvent2.GetParentEvent());
BOOST_CHECK_EQUAL(profileManager.GetProfiler(), testEvent2.GetProfiler());
- BOOST_CHECK_EQUAL(Compute::GpuAcc, testEvent2.GetComputeDevice());
+ BOOST_CHECK(gpuAccBackendId == testEvent2.GetBackendId());
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/backendsCommon/CMakeLists.txt b/src/backends/backendsCommon/CMakeLists.txt
index b120f51184..1aa4d99dc3 100644
--- a/src/backends/backendsCommon/CMakeLists.txt
+++ b/src/backends/backendsCommon/CMakeLists.txt
@@ -11,7 +11,8 @@ list(APPEND armnnBackendsCommon_sources
CpuTensorHandle.hpp
IBackendInternal.hpp
IBackendContext.hpp
- ILayerSupport.cpp
+ LayerSupportBase.cpp
+ LayerSupportBase.hpp
IMemoryManager.hpp
ITensorHandle.hpp
MakeWorkloadHelper.hpp
diff --git a/src/backends/backendsCommon/ILayerSupport.cpp b/src/backends/backendsCommon/ILayerSupport.cpp
deleted file mode 100644
index aa1bb5042d..0000000000
--- a/src/backends/backendsCommon/ILayerSupport.cpp
+++ /dev/null
@@ -1,346 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include <armnn/ILayerSupport.hpp>
-#include <armnn/Exceptions.hpp>
-
-namespace armnn
-{
-
-namespace
-{
-
-bool DefaultLayerSupport(const char* func,
- const char* file,
- unsigned int line,
- Optional<std::string&> reasonIfUnsupported)
-{
- // NOTE: We only need to return the reason if the optional parameter is not empty
- if (reasonIfUnsupported)
- {
- std::stringstream message;
- message << func << " is not implemented [" << file << ":" << line << "]";
-
- reasonIfUnsupported.value() = message.str();
- }
-
- return false;
-}
-
-} // anonymous namespace
-
-bool ILayerSupport::IsActivationSupported(const TensorInfo& input,
- const TensorInfo& output,
- const ActivationDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool ILayerSupport::IsAdditionSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool ILayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
- const TensorInfo& output,
- const TensorInfo& mean,
- const TensorInfo& var,
- const TensorInfo& beta,
- const TensorInfo& gamma,
- const BatchNormalizationDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool ILayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input,
- const TensorInfo& output,
- const BatchToSpaceNdDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool ILayerSupport::IsConstantSupported(const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool ILayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool ILayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool ILayerSupport::IsConvolution2dSupported(const TensorInfo& input,
- const TensorInfo& output,
- const Convolution2dDescriptor& descriptor,
- const TensorInfo& weights,
- const Optional<TensorInfo>& biases,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool ILayerSupport::IsDebugSupported(const TensorInfo& input,
- const TensorInfo& output,
- const DebugDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool ILayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
- const TensorInfo& output,
- const DepthwiseConvolution2dDescriptor& descriptor,
- const TensorInfo& weights,
- const Optional<TensorInfo>& biases,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool ILayerSupport::IsDivisionSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool ILayerSupport::IsEqualSupported(const armnn::TensorInfo& input0,
- const armnn::TensorInfo& input1,
- const armnn::TensorInfo& output,
- armnn::Optional<std::string &> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool ILayerSupport::IsFakeQuantizationSupported(const TensorInfo& input,
- const FakeQuantizationDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool ILayerSupport::IsFloorSupported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool ILayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
- const TensorInfo& output,
- const TensorInfo& weights,
- const TensorInfo& biases,
- const FullyConnectedDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool ILayerSupport::IsInputSupported(const TensorInfo& input,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool ILayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
- const TensorInfo& output,
- const L2NormalizationDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool ILayerSupport::IsLstmSupported(const TensorInfo& input,
- const TensorInfo& outputStateIn,
- const TensorInfo& cellStateIn,
- const TensorInfo& scratchBuffer,
- const TensorInfo& outputStateOut,
- const TensorInfo& cellStateOut,
- const TensorInfo& output,
- const LstmDescriptor& descriptor,
- const TensorInfo& inputToForgetWeights,
- const TensorInfo& inputToCellWeights,
- const TensorInfo& inputToOutputWeights,
- const TensorInfo& recurrentToForgetWeights,
- const TensorInfo& recurrentToCellWeights,
- const TensorInfo& recurrentToOutputWeights,
- const TensorInfo& forgetGateBias,
- const TensorInfo& cellBias,
- const TensorInfo& outputGateBias,
- const TensorInfo* inputToInputWeights,
- const TensorInfo* recurrentToInputWeights,
- const TensorInfo* cellToInputWeights,
- const TensorInfo* inputGateBias,
- const TensorInfo* projectionWeights,
- const TensorInfo* projectionBias,
- const TensorInfo* cellToForgetWeights,
- const TensorInfo* cellToOutputWeights,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool ILayerSupport::IsMaximumSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool ILayerSupport::IsMeanSupported(const TensorInfo& input,
- const TensorInfo& output,
- const MeanDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool ILayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
- const TensorInfo& output,
- const OriginsDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool ILayerSupport::IsMinimumSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool ILayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool ILayerSupport::IsNormalizationSupported(const TensorInfo& input,
- const TensorInfo& output,
- const NormalizationDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool ILayerSupport::IsOutputSupported(const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool ILayerSupport::IsPadSupported(const TensorInfo& input,
- const TensorInfo& output,
- const PadDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool ILayerSupport::IsPermuteSupported(const TensorInfo& input,
- const TensorInfo& output,
- const PermuteDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool ILayerSupport::IsPooling2dSupported(const TensorInfo& input,
- const TensorInfo& output,
- const Pooling2dDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool ILayerSupport::IsReshapeSupported(const TensorInfo& input,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool ILayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool ILayerSupport::IsSoftmaxSupported(const TensorInfo& input,
- const TensorInfo& output,
- const SoftmaxDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool ILayerSupport::IsSpaceToBatchNdSupported(const TensorInfo& input,
- const TensorInfo& output,
- const SpaceToBatchNdDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool ILayerSupport::IsSplitterSupported(const TensorInfo& input,
- const ViewsDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool ILayerSupport::IsStridedSliceSupported(const TensorInfo& input,
- const TensorInfo& output,
- const StridedSliceDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool ILayerSupport::IsSubtractionSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool ILayerSupport::IsGreaterSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-bool ILayerSupport::IsRsqrtSupported(const TensorInfo &input,
- const TensorInfo &output,
- Optional<std::string &> reasonIfUnsupported) const
-{
- return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
-
-} // namespace armnn
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp
new file mode 100644
index 0000000000..2987e5dd2a
--- /dev/null
+++ b/src/backends/backendsCommon/LayerSupportBase.cpp
@@ -0,0 +1,355 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "LayerSupportBase.hpp"
+
+#include <armnn/Exceptions.hpp>
+
+namespace
+{
+
+bool DefaultLayerSupport(const char* func,
+ const char* file,
+ unsigned int line,
+ armnn::Optional<std::string&> reasonIfUnsupported)
+{
+ // NOTE: We only need to return the reason if the optional parameter is not empty
+ if (reasonIfUnsupported)
+ {
+ std::stringstream message;
+ message << func << " is not implemented [" << file << ":" << line << "]";
+
+ reasonIfUnsupported.value() = message.str();
+ }
+
+ return false;
+}
+
+} // anonymous namespace
+
+namespace armnn
+{
+
+bool LayerSupportBase::IsActivationSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ActivationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsAdditionSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsBatchNormalizationSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const TensorInfo& mean,
+ const TensorInfo& var,
+ const TensorInfo& beta,
+ const TensorInfo& gamma,
+ const BatchNormalizationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsBatchToSpaceNdSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const BatchToSpaceNdDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsConstantSupported(const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsConvertFp16ToFp32Supported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsConvertFp32ToFp16Supported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsConvolution2dSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const Convolution2dDescriptor& descriptor,
+ const TensorInfo& weights,
+ const Optional<TensorInfo>& biases,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsDebugSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const DebugDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsDepthwiseConvolutionSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const DepthwiseConvolution2dDescriptor& descriptor,
+ const TensorInfo& weights,
+ const Optional<TensorInfo>& biases,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsDivisionSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsEqualSupported(const armnn::TensorInfo& input0,
+ const armnn::TensorInfo& input1,
+ const armnn::TensorInfo& output,
+ armnn::Optional<std::string &> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsFakeQuantizationSupported(const TensorInfo& input,
+ const FakeQuantizationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsFloorSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsFullyConnectedSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const TensorInfo& weights,
+ const TensorInfo& biases,
+ const FullyConnectedDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsInputSupported(const TensorInfo& input,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsL2NormalizationSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const L2NormalizationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsLstmSupported(const TensorInfo& input,
+ const TensorInfo& outputStateIn,
+ const TensorInfo& cellStateIn,
+ const TensorInfo& scratchBuffer,
+ const TensorInfo& outputStateOut,
+ const TensorInfo& cellStateOut,
+ const TensorInfo& output,
+ const LstmDescriptor& descriptor,
+ const TensorInfo& inputToForgetWeights,
+ const TensorInfo& inputToCellWeights,
+ const TensorInfo& inputToOutputWeights,
+ const TensorInfo& recurrentToForgetWeights,
+ const TensorInfo& recurrentToCellWeights,
+ const TensorInfo& recurrentToOutputWeights,
+ const TensorInfo& forgetGateBias,
+ const TensorInfo& cellBias,
+ const TensorInfo& outputGateBias,
+ const TensorInfo* inputToInputWeights,
+ const TensorInfo* recurrentToInputWeights,
+ const TensorInfo* cellToInputWeights,
+ const TensorInfo* inputGateBias,
+ const TensorInfo* projectionWeights,
+ const TensorInfo* projectionBias,
+ const TensorInfo* cellToForgetWeights,
+ const TensorInfo* cellToOutputWeights,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsMaximumSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsMeanSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const MeanDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsMemCopySupported(const armnn::TensorInfo& input,
+ const armnn::TensorInfo& output,
+ armnn::Optional<std::string &> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
+ const TensorInfo& output,
+ const OriginsDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsMinimumSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsMultiplicationSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsNormalizationSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const NormalizationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsOutputSupported(const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsPadSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const PadDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsPermuteSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const PermuteDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsPooling2dSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const Pooling2dDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsReshapeSupported(const TensorInfo& input,
+ const ReshapeDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsResizeBilinearSupported(const TensorInfo& input,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsSoftmaxSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const SoftmaxDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsSpaceToBatchNdSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const SpaceToBatchNdDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsSplitterSupported(const TensorInfo& input,
+ const ViewsDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsStridedSliceSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const StridedSliceDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsSubtractionSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsGreaterSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsRsqrtSupported(const TensorInfo &input,
+ const TensorInfo &output,
+ Optional<std::string &> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+} // namespace armnn
diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp
new file mode 100644
index 0000000000..8c7aa98043
--- /dev/null
+++ b/src/backends/backendsCommon/LayerSupportBase.hpp
@@ -0,0 +1,223 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/ILayerSupport.hpp>
+
+namespace armnn
+{
+
+class LayerSupportBase : public ILayerSupport
+{
+public:
+ bool IsActivationSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ActivationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsAdditionSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsBatchNormalizationSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const TensorInfo& mean,
+ const TensorInfo& var,
+ const TensorInfo& beta,
+ const TensorInfo& gamma,
+ const BatchNormalizationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsBatchToSpaceNdSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const BatchToSpaceNdDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsConstantSupported(const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsConvertFp16ToFp32Supported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsConvertFp32ToFp16Supported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsConvolution2dSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const Convolution2dDescriptor& descriptor,
+ const TensorInfo& weights,
+ const Optional<TensorInfo>& biases,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsDebugSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const DebugDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsDepthwiseConvolutionSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const DepthwiseConvolution2dDescriptor& descriptor,
+ const TensorInfo& weights,
+ const Optional<TensorInfo>& biases,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsDivisionSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsEqualSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsFakeQuantizationSupported(const TensorInfo& input,
+ const FakeQuantizationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsFloorSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsFullyConnectedSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const TensorInfo& weights,
+ const TensorInfo& biases,
+ const FullyConnectedDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsGreaterSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsInputSupported(const TensorInfo& input,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsL2NormalizationSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const L2NormalizationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsLstmSupported(const TensorInfo& input,
+ const TensorInfo& outputStateIn,
+ const TensorInfo& cellStateIn,
+ const TensorInfo& scratchBuffer,
+ const TensorInfo& outputStateOut,
+ const TensorInfo& cellStateOut,
+ const TensorInfo& output,
+ const LstmDescriptor& descriptor,
+ const TensorInfo& inputToForgetWeights,
+ const TensorInfo& inputToCellWeights,
+ const TensorInfo& inputToOutputWeights,
+ const TensorInfo& recurrentToForgetWeights,
+ const TensorInfo& recurrentToCellWeights,
+ const TensorInfo& recurrentToOutputWeights,
+ const TensorInfo& forgetGateBias,
+ const TensorInfo& cellBias,
+ const TensorInfo& outputGateBias,
+ const TensorInfo* inputToInputWeights,
+ const TensorInfo* recurrentToInputWeights,
+ const TensorInfo* cellToInputWeights,
+ const TensorInfo* inputGateBias,
+ const TensorInfo* projectionWeights,
+ const TensorInfo* projectionBias,
+ const TensorInfo* cellToForgetWeights,
+ const TensorInfo* cellToOutputWeights,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsMaximumSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsMeanSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const MeanDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsMemCopySupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsMergerSupported(const std::vector<const TensorInfo*> inputs,
+ const TensorInfo& output,
+ const OriginsDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsMinimumSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsMultiplicationSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsNormalizationSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const NormalizationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsOutputSupported(const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsPadSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const PadDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsPermuteSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const PermuteDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsPooling2dSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const Pooling2dDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsReshapeSupported(const TensorInfo& input,
+ const ReshapeDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsResizeBilinearSupported(const TensorInfo& input,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsRsqrtSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsSoftmaxSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const SoftmaxDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsSpaceToBatchNdSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const SpaceToBatchNdDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsSplitterSupported(const TensorInfo& input,
+ const ViewsDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsStridedSliceSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const StridedSliceDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsSubtractionSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+};
+
+} // namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 1dc96a5ec3..209ba6a4ed 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -190,15 +190,6 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
reason);
break;
}
- case LayerType::MemCopy:
- {
- // MemCopy supported for CpuRef, CpuAcc and GpuAcc backends,
- // (also treat Undefined as CpuRef to avoid breaking lots of Unit tests).
- result = backendId == Compute::CpuRef || backendId == Compute::Undefined
- || backendId == Compute::CpuAcc || backendId == Compute::GpuAcc;
- reason.value() = "Unsupported backend type";
- break;
- }
case LayerType::Debug:
{
auto cLayer = boost::polymorphic_downcast<const DebugLayer*>(&layer);
@@ -487,6 +478,16 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
reason);
break;
}
+ case LayerType::MemCopy:
+ {
+ const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+ const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+
+ result = layerSupportObject->IsMemCopySupported(OverrideDataType(input, dataType),
+ OverrideDataType(output, dataType),
+ reason);
+ break;
+ }
case LayerType::Merger:
{
auto cLayer = boost::polymorphic_downcast<const MergerLayer*>(&layer);
@@ -590,8 +591,11 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
}
case LayerType::Reshape:
{
+ auto cLayer = boost::polymorphic_downcast<const ReshapeLayer*>(&layer);
const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
- result = layerSupportObject->IsReshapeSupported(OverrideDataType(input, dataType), reason);
+ result = layerSupportObject->IsReshapeSupported(OverrideDataType(input, dataType),
+ cLayer->GetParameters(),
+ reason);
break;
}
case LayerType::ResizeBilinear:
diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk
index 4e79bfcd7e..a1cc0c1b3a 100644
--- a/src/backends/backendsCommon/common.mk
+++ b/src/backends/backendsCommon/common.mk
@@ -10,7 +10,7 @@
COMMON_SOURCES := \
BackendRegistry.cpp \
CpuTensorHandle.cpp \
- ILayerSupport.cpp \
+ LayerSupportBase.cpp \
MemCopyWorkload.cpp \
OutputHandler.cpp \
WorkloadData.cpp \
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index c4d45fe8ef..c1139e2e1e 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -385,6 +385,15 @@ bool ClLayerSupport::IsMeanSupported(const TensorInfo& input,
descriptor);
}
+bool ClLayerSupport::IsMemCopySupported(const TensorInfo &input,
+ const TensorInfo &output,
+ Optional<std::string &> reasonIfUnsupported) const
+{
+ ignore_unused(input);
+ ignore_unused(output);
+ return true;
+}
+
bool ClLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
const TensorInfo& output,
const OriginsDescriptor& descriptor,
@@ -479,9 +488,11 @@ bool ClLayerSupport::IsPooling2dSupported(const TensorInfo& input,
}
bool ClLayerSupport::IsReshapeSupported(const TensorInfo& input,
+ const ReshapeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
ignore_unused(input);
+ ignore_unused(descriptor);
ignore_unused(reasonIfUnsupported);
return true;
}
diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp
index 80dd1c85d7..b06e6a9404 100644
--- a/src/backends/cl/ClLayerSupport.hpp
+++ b/src/backends/cl/ClLayerSupport.hpp
@@ -4,12 +4,12 @@
//
#pragma once
-#include <armnn/ILayerSupport.hpp>
+#include <backendsCommon/LayerSupportBase.hpp>
namespace armnn
{
-class ClLayerSupport : public ILayerSupport
+class ClLayerSupport : public LayerSupportBase
{
public:
bool IsActivationSupported(const TensorInfo& input,
@@ -122,6 +122,10 @@ public:
const MeanDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsMemCopySupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsMergerSupported(const std::vector<const TensorInfo*> inputs,
const TensorInfo& output,
const OriginsDescriptor& descriptor,
@@ -161,6 +165,7 @@ public:
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
bool IsReshapeSupported(const TensorInfo& input,
+ const ReshapeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
bool IsResizeBilinearSupported(const TensorInfo& input,
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index cc8f973510..1f205ed6d6 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -271,6 +271,15 @@ bool NeonLayerSupport::IsMeanSupported(const TensorInfo& input,
descriptor);
}
+bool NeonLayerSupport::IsMemCopySupported(const TensorInfo &input,
+ const TensorInfo &output,
+ Optional<std::string &> reasonIfUnsupported) const
+{
+ ignore_unused(input);
+ ignore_unused(output);
+ return true;
+}
+
bool NeonLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
const TensorInfo& output,
const OriginsDescriptor& descriptor,
@@ -355,22 +364,16 @@ bool NeonLayerSupport::IsPooling2dSupported(const TensorInfo& input,
}
bool NeonLayerSupport::IsReshapeSupported(const TensorInfo& input,
+ const ReshapeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
+ ignore_unused(descriptor);
return IsSupportedForDataTypeNeon(reasonIfUnsupported,
input.GetDataType(),
&TrueFunc<>,
&TrueFunc<>);
}
-bool NeonLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
- Optional<std::string&> reasonIfUnsupported) const
-{
- ignore_unused(input);
- ignore_unused(reasonIfUnsupported);
- return false;
-}
-
bool NeonLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
const TensorInfo& output,
const SoftmaxDescriptor& descriptor,
diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp
index 73193d34b7..c522c6ee0a 100644
--- a/src/backends/neon/NeonLayerSupport.hpp
+++ b/src/backends/neon/NeonLayerSupport.hpp
@@ -4,12 +4,12 @@
//
#pragma once
-#include <armnn/ILayerSupport.hpp>
+#include <backendsCommon/LayerSupportBase.hpp>
namespace armnn
{
-class NeonLayerSupport : public ILayerSupport
+class NeonLayerSupport : public LayerSupportBase
{
public:
bool IsActivationSupported(const TensorInfo& input,
@@ -85,6 +85,10 @@ public:
const MeanDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsMemCopySupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsMergerSupported(const std::vector<const TensorInfo*> inputs,
const TensorInfo& output,
const OriginsDescriptor& descriptor,
@@ -119,11 +123,9 @@ public:
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
bool IsReshapeSupported(const TensorInfo& input,
+ const ReshapeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
- bool IsResizeBilinearSupported(const TensorInfo& input,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
bool IsSoftmaxSupported(const TensorInfo& input,
const TensorInfo& output,
const SoftmaxDescriptor& descriptor,
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 56d2e4c659..61a34f957e 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -390,6 +390,17 @@ bool RefLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inp
&TrueFunc<>);
}
+bool RefLayerSupport::IsMemCopySupported(const TensorInfo &input,
+ const TensorInfo &output,
+ Optional<std::string &> reasonIfUnsupported) const
+{
+ ignore_unused(output);
+ return IsSupportedForDataTypeRef(reasonIfUnsupported,
+ input.GetDataType(),
+ &TrueFunc<>,
+ &TrueFunc<>);
+}
+
bool RefLayerSupport::IsMinimumSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
@@ -478,8 +489,10 @@ bool RefLayerSupport::IsPooling2dSupported(const TensorInfo& input,
}
bool RefLayerSupport::IsReshapeSupported(const TensorInfo& input,
+ const ReshapeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
+ ignore_unused(descriptor);
return IsSupportedForDataTypeRef(reasonIfUnsupported,
input.GetDataType(),
&TrueFunc<>,
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index 188faa84b6..5778806f00 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -4,12 +4,12 @@
//
#pragma once
-#include <armnn/ILayerSupport.hpp>
+#include <backendsCommon/LayerSupportBase.hpp>
namespace armnn
{
-class RefLayerSupport : public ILayerSupport
+class RefLayerSupport : public LayerSupportBase
{
public:
bool IsActivationSupported(const TensorInfo& input,
@@ -93,7 +93,7 @@ public:
bool IsGreaterSupported(const TensorInfo& input0,
const TensorInfo& input1,
- const TensorInfo& ouput,
+ const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
bool IsInputSupported(const TensorInfo& input,
@@ -146,6 +146,10 @@ public:
const OriginsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsMemCopySupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsMinimumSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
@@ -180,6 +184,7 @@ public:
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
bool IsReshapeSupported(const TensorInfo& input,
+ const ReshapeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
bool IsResizeBilinearSupported(const TensorInfo& input,