diff options
Diffstat (limited to 'src/armnn/backends')
-rw-r--r-- | src/armnn/backends/ClLayerSupport.cpp | 9 | ||||
-rw-r--r-- | src/armnn/backends/ClLayerSupport.hpp | 5 | ||||
-rw-r--r-- | src/armnn/backends/ClWorkloadFactory.cpp | 12 | ||||
-rw-r--r-- | src/armnn/backends/ClWorkloadFactory.hpp | 3 | ||||
-rw-r--r-- | src/armnn/backends/NeonLayerSupport.cpp | 9 | ||||
-rw-r--r-- | src/armnn/backends/NeonLayerSupport.hpp | 5 | ||||
-rw-r--r-- | src/armnn/backends/NeonWorkloadFactory.cpp | 12 | ||||
-rw-r--r-- | src/armnn/backends/NeonWorkloadFactory.hpp | 3 | ||||
-rw-r--r-- | src/armnn/backends/RefLayerSupport.cpp | 9 | ||||
-rw-r--r-- | src/armnn/backends/RefLayerSupport.hpp | 5 | ||||
-rw-r--r-- | src/armnn/backends/RefWorkloadFactory.cpp | 6 | ||||
-rw-r--r-- | src/armnn/backends/RefWorkloadFactory.hpp | 2 | ||||
-rw-r--r-- | src/armnn/backends/WorkloadData.cpp | 13 | ||||
-rw-r--r-- | src/armnn/backends/WorkloadData.hpp | 6 | ||||
-rw-r--r-- | src/armnn/backends/WorkloadFactory.cpp | 13 | ||||
-rw-r--r-- | src/armnn/backends/WorkloadFactory.hpp | 3 | ||||
-rw-r--r-- | src/armnn/backends/test/IsLayerSupportedTestImpl.hpp | 2 |
17 files changed, 116 insertions, 1 deletions
diff --git a/src/armnn/backends/ClLayerSupport.cpp b/src/armnn/backends/ClLayerSupport.cpp index 3a9a22a625..7b5fee2175 100644 --- a/src/armnn/backends/ClLayerSupport.cpp +++ b/src/armnn/backends/ClLayerSupport.cpp @@ -250,6 +250,15 @@ bool IsDivisionSupportedCl(const TensorInfo& input0, output); } +bool IsSubtractionSupportedCl(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + std::string* reasonIfUnsupported) +{ + // At the moment subtraction is not supported + return false; +} + bool IsFullyConnectedSupportedCl(const TensorInfo& input, const TensorInfo& output, const TensorInfo& weights, diff --git a/src/armnn/backends/ClLayerSupport.hpp b/src/armnn/backends/ClLayerSupport.hpp index be56d5d0d6..dbe546c18d 100644 --- a/src/armnn/backends/ClLayerSupport.hpp +++ b/src/armnn/backends/ClLayerSupport.hpp @@ -59,6 +59,11 @@ bool IsDivisionSupportedCl(const TensorInfo& input0, const TensorInfo& output, std::string* reasonIfUnsupported = nullptr); +bool IsSubtractionSupportedCl(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + std::string* reasonIfUnsupported = nullptr); + bool IsFullyConnectedSupportedCl(const TensorInfo& input, const TensorInfo& output, const TensorInfo& weights, diff --git a/src/armnn/backends/ClWorkloadFactory.cpp b/src/armnn/backends/ClWorkloadFactory.cpp index d2f3b11fb2..8c9ca2081b 100644 --- a/src/armnn/backends/ClWorkloadFactory.cpp +++ b/src/armnn/backends/ClWorkloadFactory.cpp @@ -169,6 +169,12 @@ std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateDivision( return MakeWorkload<ClDivisionFloatWorkload, NullWorkload>(descriptor, info); } +std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info); +} + std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateBatchNormalization( const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const { @@ -435,6 +441,12 @@ std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDivision(const DivisionQueue return nullptr; } +std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + void ClWorkloadFactory::Finalize() { } diff --git a/src/armnn/backends/ClWorkloadFactory.hpp b/src/armnn/backends/ClWorkloadFactory.hpp index 901bf406f8..dedbb50a6d 100644 --- a/src/armnn/backends/ClWorkloadFactory.hpp +++ b/src/armnn/backends/ClWorkloadFactory.hpp @@ -111,6 +111,9 @@ public: virtual std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& descriptor, const WorkloadInfo& info) const override; + virtual std::unique_ptr<IWorkload> CreateSubtraction(const SubtractionQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + virtual void Finalize() override; virtual void Release() override; diff --git a/src/armnn/backends/NeonLayerSupport.cpp b/src/armnn/backends/NeonLayerSupport.cpp index f39871b1ad..73d251893f 100644 --- a/src/armnn/backends/NeonLayerSupport.cpp +++ b/src/armnn/backends/NeonLayerSupport.cpp @@ -234,6 +234,15 @@ bool IsDivisionSupportedNeon(const TensorInfo& input0, return false; } +bool IsSubtractionSupportedNeon(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + std::string* reasonIfUnsupported) +{ + // At the moment subtraction is not supported + return false; +} + bool IsFullyConnectedSupportedNeon(const TensorInfo& input, const TensorInfo& output, const TensorInfo& weights, diff --git a/src/armnn/backends/NeonLayerSupport.hpp b/src/armnn/backends/NeonLayerSupport.hpp index 1715f83655..f7b62536a6 100644 --- a/src/armnn/backends/NeonLayerSupport.hpp +++ b/src/armnn/backends/NeonLayerSupport.hpp @@ -64,6 +64,11 @@ bool IsDivisionSupportedNeon(const TensorInfo& input0, const TensorInfo& output, std::string* reasonIfUnsupported = nullptr); +bool IsSubtractionSupportedNeon(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + std::string* reasonIfUnsupported = nullptr); + bool IsFullyConnectedSupportedNeon(const TensorInfo& input, const TensorInfo& output, const TensorInfo& weights, diff --git a/src/armnn/backends/NeonWorkloadFactory.cpp b/src/armnn/backends/NeonWorkloadFactory.cpp index c90362cce6..fe9fd55dc3 100644 --- a/src/armnn/backends/NeonWorkloadFactory.cpp +++ b/src/armnn/backends/NeonWorkloadFactory.cpp @@ -162,6 +162,12 @@ std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateDivision( return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info); } +std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateSubtraction( + const SubtractionQueueDescriptor& descriptor, const WorkloadInfo& info) const +{ + return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info); +} + std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateBatchNormalization( const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const { @@ -429,6 +435,12 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateDivision(const DivisionQue return nullptr; } +std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& data, + const WorkloadInfo& info) const +{ + return nullptr; +} + void NeonWorkloadFactory::Finalize() {} diff --git a/src/armnn/backends/NeonWorkloadFactory.hpp b/src/armnn/backends/NeonWorkloadFactory.hpp index 32e745f57b..34d0e9529d 100644 --- a/src/armnn/backends/NeonWorkloadFactory.hpp +++ b/src/armnn/backends/NeonWorkloadFactory.hpp @@ -111,6 +111,9 @@ public: virtual std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& descriptor, const WorkloadInfo& info) const override; + virtual std::unique_ptr<IWorkload> CreateSubtraction(const SubtractionQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + virtual void Finalize() override; virtual void Release() override; diff --git a/src/armnn/backends/RefLayerSupport.cpp b/src/armnn/backends/RefLayerSupport.cpp index ee91e73df2..5437574789 100644 --- a/src/armnn/backends/RefLayerSupport.cpp +++ b/src/armnn/backends/RefLayerSupport.cpp @@ -130,6 +130,15 @@ bool IsDivisionSupportedRef(const TensorInfo& input0, &TrueFunc<>); } +bool IsSubtractionSupportedRef(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + std::string* reasonIfUnsupported) +{ + // At the moment subtraction is not supported + return false; +} + bool IsFullyConnectedSupportedRef(const TensorInfo& input, const TensorInfo& output, const TensorInfo& weights, diff --git a/src/armnn/backends/RefLayerSupport.hpp b/src/armnn/backends/RefLayerSupport.hpp index d396867f3d..464eb1c91c 100644 --- a/src/armnn/backends/RefLayerSupport.hpp +++ b/src/armnn/backends/RefLayerSupport.hpp @@ -56,6 +56,11 @@ bool IsDivisionSupportedRef(const TensorInfo& input0, const TensorInfo& output, std::string* reasonIfUnsupported = nullptr); +bool IsSubtractionSupportedRef(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + std::string* reasonIfUnsupported = nullptr); + bool IsFullyConnectedSupportedRef(const TensorInfo& input, const TensorInfo& output, const TensorInfo& weights, diff --git a/src/armnn/backends/RefWorkloadFactory.cpp b/src/armnn/backends/RefWorkloadFactory.cpp index d4891b3837..4de9274eb8 100644 --- a/src/armnn/backends/RefWorkloadFactory.cpp +++ b/src/armnn/backends/RefWorkloadFactory.cpp @@ -227,4 +227,10 @@ std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateDivision( return MakeWorkload<RefDivisionFloat32Workload, RefDivisionUint8Workload>(descriptor, info); } +std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateSubtraction( + const SubtractionQueueDescriptor& descriptor, const WorkloadInfo& info) const +{ + return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info); +} + } // namespace armnn diff --git a/src/armnn/backends/RefWorkloadFactory.hpp b/src/armnn/backends/RefWorkloadFactory.hpp index 8586ca6909..5fbc6e40bd 100644 --- a/src/armnn/backends/RefWorkloadFactory.hpp +++ b/src/armnn/backends/RefWorkloadFactory.hpp @@ -127,6 +127,8 @@ public: virtual std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& descriptor, const WorkloadInfo& info) const override; + virtual std::unique_ptr<IWorkload> CreateSubtraction(const SubtractionQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; private: template <typename F32Workload, typename U8Workload, typename QueueDescriptorType> diff --git a/src/armnn/backends/WorkloadData.cpp b/src/armnn/backends/WorkloadData.cpp index 660637e96f..e3cf83fc6c 100644 --- a/src/armnn/backends/WorkloadData.cpp +++ b/src/armnn/backends/WorkloadData.cpp @@ -811,4 +811,17 @@ void DivisionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const "second input"); } +void SubtractionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + ValidateTwoInputs(workloadInfo, "SubtractionQueueDescriptor"); + ValidateSingleOutput(workloadInfo, "SubtractionQueueDescriptor"); + + ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0], + workloadInfo.m_InputTensorInfos[1], + workloadInfo.m_OutputTensorInfos[0], + "SubtractionQueueDescriptor", + "first input", + "second input"); +} + } //namespace armnn diff --git a/src/armnn/backends/WorkloadData.hpp b/src/armnn/backends/WorkloadData.hpp index d0b81632db..d50a237273 100644 --- a/src/armnn/backends/WorkloadData.hpp +++ b/src/armnn/backends/WorkloadData.hpp @@ -190,6 +190,12 @@ struct DivisionQueueDescriptor : QueueDescriptor void Validate(const WorkloadInfo& workloadInfo) const; }; +// Subtraction layer workload data. +struct SubtractionQueueDescriptor : QueueDescriptor +{ + void Validate(const WorkloadInfo& workloadInfo) const; +}; + // Batch norm layer workload data. struct BatchNormalizationQueueDescriptor : QueueDescriptorWithParameters<BatchNormalizationDescriptor> { diff --git a/src/armnn/backends/WorkloadFactory.cpp b/src/armnn/backends/WorkloadFactory.cpp index ba926e847c..d1887252c2 100644 --- a/src/armnn/backends/WorkloadFactory.cpp +++ b/src/armnn/backends/WorkloadFactory.cpp @@ -524,6 +524,19 @@ bool IWorkloadFactory::IsLayerSupported(Compute compute, const Layer& layer, boo reasonCapacity); break; } + case LayerType::Subtraction: + { + const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); + const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo(); + const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); + result = IsSubtractionSupported(compute, + OverrideDataType(input0, dataType), + OverrideDataType(input1, dataType), + OverrideDataType(output, dataType), + reason, + reasonCapacity); + break; + } default: { BOOST_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer."); diff --git a/src/armnn/backends/WorkloadFactory.hpp b/src/armnn/backends/WorkloadFactory.hpp index 771aecfec3..0ae5a3ea1d 100644 --- a/src/armnn/backends/WorkloadFactory.hpp +++ b/src/armnn/backends/WorkloadFactory.hpp @@ -123,6 +123,9 @@ public: virtual std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& descriptor, const WorkloadInfo& info) const = 0; + + virtual std::unique_ptr<IWorkload> CreateSubtraction(const SubtractionQueueDescriptor& descriptor, + const WorkloadInfo& info) const = 0; }; } //namespace armnn diff --git a/src/armnn/backends/test/IsLayerSupportedTestImpl.hpp b/src/armnn/backends/test/IsLayerSupportedTestImpl.hpp index a580be32e5..7745972fdd 100644 --- a/src/armnn/backends/test/IsLayerSupportedTestImpl.hpp +++ b/src/armnn/backends/test/IsLayerSupportedTestImpl.hpp @@ -350,7 +350,7 @@ DECLARE_LAYER_POLICY_2_PARAM(Softmax) DECLARE_LAYER_POLICY_2_PARAM(Splitter) - +DECLARE_LAYER_POLICY_1_PARAM(Subtraction) // Generic implementation to get the number of input slots for a given layer type; |