diff options
author | josh minor <josh.minor@arm.com> | 2020-01-06 16:40:46 -0600 |
---|---|---|
committer | Derek Lamberti <derek.lamberti@arm.com> | 2020-01-23 14:29:14 +0000 |
commit | 4a3c61091037e7e86e8b03bb060d8c1ab82731a9 (patch) | |
tree | 928644023400ad5ac0c26b33dfff2f975567d6e8 /src/backends | |
parent | 190a39a4a9598e42b636ae4ab843761884148160 (diff) | |
download | armnn-4a3c61091037e7e86e8b03bb060d8c1ab82731a9.tar.gz |
IVGCVSW-4259 Add frontend and reference workload for UnaryOperationLayer
* Added new layer named ElementwiseUnary
* Deprecated existing Abs/Rsqrt layer functions
* Updated existing Abs/Rsqrt test infrastructure to use new layer
* Added boilerplate for new Exp,Neg,Sqrt elemwise op layers
* AbsQuantize test removed pending future commit
* Serialization support added
!android-nn-driver:2550
Change-Id: Ic595c645925e17b45db568187fd05646daf2e87f
Signed-off-by: josh minor <josh.minor@arm.com>
Diffstat (limited to 'src/backends')
58 files changed, 979 insertions, 737 deletions
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp index 8332774202..b19356f955 100644 --- a/src/backends/backendsCommon/LayerSupportBase.cpp +++ b/src/backends/backendsCommon/LayerSupportBase.cpp @@ -3,7 +3,10 @@ // SPDX-License-Identifier: MIT // +#include <armnn/Deprecated.hpp> +#include <armnn/Descriptors.hpp> #include <armnn/Exceptions.hpp> +#include <armnn/Types.hpp> #include <backendsCommon/LayerSupportBase.hpp> @@ -195,6 +198,26 @@ bool LayerSupportBase::IsDivisionSupported(const TensorInfo& /*input0*/, return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } +bool LayerSupportBase::IsElementwiseUnarySupported(const TensorInfo& input, + const TensorInfo& output, + const ElementwiseUnaryDescriptor& descriptor, + Optional<std::string&> reasonIfUnsupported) const +{ + if (descriptor.m_Operation == UnaryOperation::Abs) + { + ARMNN_NO_DEPRECATE_WARN_BEGIN + return IsAbsSupported(input, output, reasonIfUnsupported); + ARMNN_NO_DEPRECATE_WARN_END + } + else if (descriptor.m_Operation == UnaryOperation::Rsqrt) + { + ARMNN_NO_DEPRECATE_WARN_BEGIN + return IsRsqrtSupported(input, output, reasonIfUnsupported); + ARMNN_NO_DEPRECATE_WARN_END + } + return false; +} + bool LayerSupportBase::IsEqualSupported(const armnn::TensorInfo& /*input0*/, const armnn::TensorInfo& /*input1*/, const armnn::TensorInfo& /*output*/, diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp index 60f94d0c4d..7a65eb55ed 100644 --- a/src/backends/backendsCommon/LayerSupportBase.hpp +++ b/src/backends/backendsCommon/LayerSupportBase.hpp @@ -13,6 +13,7 @@ namespace armnn class LayerSupportBase : public ILayerSupport { public: + ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead") bool IsAbsSupported(const TensorInfo& input, const TensorInfo& output, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; @@ -119,6 +120,11 @@ public: const TensorInfo& output, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + bool IsElementwiseUnarySupported(const TensorInfo& input, + const TensorInfo& output, + const ElementwiseUnaryDescriptor& descriptor, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead") bool IsEqualSupported(const TensorInfo& input0, const TensorInfo& input1, @@ -278,6 +284,7 @@ public: const TensorInfo& output, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead") bool IsRsqrtSupported(const TensorInfo& input, const TensorInfo& output, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp index fa5c6fe38e..d2ab41ef40 100644 --- a/src/backends/backendsCommon/WorkloadData.cpp +++ b/src/backends/backendsCommon/WorkloadData.cpp @@ -2969,4 +2969,28 @@ void ComparisonQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const } } +void ElementwiseUnaryQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + const std::string descriptorName{"ElementwiseUnaryQueueDescriptor"}; + + ValidateNumInputs(workloadInfo, descriptorName, 1); + ValidateNumOutputs(workloadInfo, descriptorName, 1); + + const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0]; + const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0]; + + ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output"); + + std::vector<DataType> supportedTypes = + { + DataType::Float16, + DataType::Float32, + DataType::QAsymmU8, + DataType::QSymmS16 + }; + + ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName); + ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output"); +} + } // namespace armnn diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp index 43be3cd6e1..c5fcf15c3b 100644 --- a/src/backends/backendsCommon/WorkloadData.hpp +++ b/src/backends/backendsCommon/WorkloadData.hpp @@ -560,4 +560,9 @@ struct ComparisonQueueDescriptor : QueueDescriptorWithParameters<ComparisonDescr void Validate(const WorkloadInfo& workloadInfo) const; }; +struct ElementwiseUnaryQueueDescriptor : QueueDescriptorWithParameters<ElementwiseUnaryDescriptor> +{ + void Validate(const WorkloadInfo& workloadInfo) const; +}; + } // namespace armnn diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp index 54ae585a82..acb73b589d 100644 --- a/src/backends/backendsCommon/WorkloadFactory.cpp +++ b/src/backends/backendsCommon/WorkloadFactory.cpp @@ -68,15 +68,6 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, switch(layer.GetType()) { - case LayerType::Abs: - { - const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); - const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsAbsSupported(OverrideDataType(input, dataType), - OverrideDataType(output, dataType), - reason); - break; - } case LayerType::Activation: { auto cLayer = boost::polymorphic_downcast<const ActivationLayer*>(&layer); @@ -294,6 +285,19 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, reason); break; } + case LayerType::ElementwiseUnary: + { + auto cLayer = boost::polymorphic_downcast<const ElementwiseUnaryLayer*>(&layer); + + const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); + const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); + + result = layerSupportObject->IsElementwiseUnarySupported(OverrideDataType(input, dataType), + OverrideDataType(output, dataType), + cLayer->GetParameters(), + reason); + break; + } case LayerType::FakeQuantization: { auto cLayer = boost::polymorphic_downcast<const FakeQuantizationLayer*>(&layer); @@ -807,15 +811,6 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, reason); break; } - case LayerType::Rsqrt: - { - const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); - const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsRsqrtSupported(OverrideDataType(input, dataType), - OverrideDataType(output, dataType), - reason); - break; - } case LayerType::Slice: { auto cLayer = boost::polymorphic_downcast<const SliceLayer*>(&layer); @@ -1182,6 +1177,12 @@ std::unique_ptr<IWorkload> IWorkloadFactory::CreateDivision(const DivisionQueueD return std::unique_ptr<IWorkload>(); } +std::unique_ptr<IWorkload> IWorkloadFactory::CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& /*desc*/, + const WorkloadInfo& /*info*/) const +{ + return std::unique_ptr<IWorkload>(); +} + std::unique_ptr<IWorkload> IWorkloadFactory::CreateEqual(const EqualQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*Info*/) const { diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp index 6e6478fd6a..e1cdff6abe 100644 --- a/src/backends/backendsCommon/WorkloadFactory.hpp +++ b/src/backends/backendsCommon/WorkloadFactory.hpp @@ -51,6 +51,7 @@ public: DataLayout dataLayout, const bool IsMemoryManaged = true) const = 0; + ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead") virtual std::unique_ptr<IWorkload> CreateAbs(const AbsQueueDescriptor& descriptor, const WorkloadInfo& info) const; @@ -105,6 +106,9 @@ public: virtual std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& descriptor, const WorkloadInfo& info) const; + virtual std::unique_ptr<IWorkload> CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor, + const WorkloadInfo& Info) const; + ARMNN_DEPRECATED_MSG("Use CreateComparison instead") virtual std::unique_ptr<IWorkload> CreateEqual(const EqualQueueDescriptor& descriptor, const WorkloadInfo& Info) const; @@ -200,6 +204,7 @@ public: virtual std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor, const WorkloadInfo& info) const; + ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead") virtual std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& descriptor, const WorkloadInfo& info) const; diff --git a/src/backends/backendsCommon/WorkloadFactoryBase.hpp b/src/backends/backendsCommon/WorkloadFactoryBase.hpp index 1947c6935b..9602cc3b6c 100644 --- a/src/backends/backendsCommon/WorkloadFactoryBase.hpp +++ b/src/backends/backendsCommon/WorkloadFactoryBase.hpp @@ -106,6 +106,22 @@ public: const WorkloadInfo& /*info*/) const override { return nullptr; } + std::unique_ptr<IWorkload> CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor, + const WorkloadInfo& info) const override + { + if (descriptor.m_Parameters.m_Operation == UnaryOperation::Abs) + { + AbsQueueDescriptor absDescriptor; + return CreateAbs(absDescriptor, info); + } + else if (descriptor.m_Parameters.m_Operation == UnaryOperation::Rsqrt) + { + RsqrtQueueDescriptor rsqrtDescriptor; + return CreateRsqrt(rsqrtDescriptor, info); + } + return nullptr; + } + std::unique_ptr<IWorkload> CreateFakeQuantization(const FakeQuantizationQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const override { return nullptr; } diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk index 4461cd68b1..56a21b386c 100644 --- a/src/backends/backendsCommon/common.mk +++ b/src/backends/backendsCommon/common.mk @@ -50,6 +50,7 @@ COMMON_TEST_SOURCES := \ test/layerTests/DepthToSpaceTestImpl.cpp \ test/layerTests/DequantizeTestImpl.cpp \ test/layerTests/DivisionTestImpl.cpp \ + test/layerTests/ElementwiseUnaryTestImpl.cpp \ test/layerTests/FakeQuantizationTestImpl.cpp \ test/layerTests/FloorTestImpl.cpp \ test/layerTests/FullyConnectedTestImpl.cpp \ diff --git a/src/backends/backendsCommon/test/AbsEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/AbsEndToEndTestImpl.hpp deleted file mode 100644 index 602ccd6a19..0000000000 --- a/src/backends/backendsCommon/test/AbsEndToEndTestImpl.hpp +++ /dev/null @@ -1,65 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#pragma once - -#include "CommonTestUtils.hpp" - -#include <QuantizeHelper.hpp> -#include <ResolveType.hpp> - - -namespace -{ - -armnn::INetworkPtr CreateAbsNetwork(const armnn::TensorInfo& tensorInfo) -{ - armnn::INetworkPtr network(armnn::INetwork::Create()); - - armnn::IConnectableLayer* inputLayer = network->AddInputLayer(0, "input"); - armnn::IConnectableLayer* absLayer = network->AddAbsLayer("abs"); - armnn::IConnectableLayer* outputLayer = network->AddOutputLayer(0, "output"); - - Connect(inputLayer, absLayer, tensorInfo, 0, 0); - Connect(absLayer, outputLayer, tensorInfo, 0, 0); - - return network; -} - -} // anonymous namespace - -template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>> -void AbsEndToEnd(const std::vector<armnn::BackendId>& backends) -{ - using namespace armnn; - - const float qScale = IsQuantizedType<T>() ? 0.25f : 1.0f; - const int32_t qOffset = IsQuantizedType<T>() ? 50 : 0; - - TensorInfo tensorInfo({ 1, 1, 2, 3 }, ArmnnType, qScale, qOffset); - - std::vector<float> inputData = - { - -1.f, 2.f, -3.f, - 4.f, -5.f, 6.f - }; - - std::vector<float> expectedOutputData = - { - 1.f, 2.f, 3.f, - 4.f, 5.f, 6.f - }; - - // quantize data - std::vector<T> qInputData = armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset); - std::vector<T> qExpectedOutputData = armnnUtils::QuantizedVector<T>(expectedOutputData, qScale, qOffset); - - INetworkPtr network = CreateAbsNetwork(tensorInfo); - - EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network), - { { 0, qInputData } }, - { { 0, qExpectedOutputData } }, - backends); -} diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt index 82df782317..4716bd47e4 100644 --- a/src/backends/backendsCommon/test/CMakeLists.txt +++ b/src/backends/backendsCommon/test/CMakeLists.txt @@ -4,7 +4,6 @@ # list(APPEND armnnBackendsCommonUnitTests_sources - AbsEndToEndTestImpl.hpp ActivationFixture.hpp ArgMinMaxEndToEndTestImpl.hpp BackendIdTests.cpp @@ -19,6 +18,7 @@ list(APPEND armnnBackendsCommonUnitTests_sources DetectionPostProcessEndToEndTestImpl.hpp DynamicBackendTests.cpp DynamicBackendTests.hpp + ElementwiseUnaryEndToEndTestImpl.hpp EndToEndTestImpl.hpp GatherEndToEndTestImpl.hpp InstanceNormalizationEndToEndTestImpl.cpp @@ -81,6 +81,8 @@ list(APPEND armnnBackendsCommonUnitTests_sources layerTests/DivisionTestImpl.cpp layerTests/DivisionTestImpl.hpp layerTests/ElementwiseTestImpl.hpp + layerTests/ElementwiseUnaryTestImpl.cpp + layerTests/ElementwiseUnaryTestImpl.hpp layerTests/FakeQuantizationTestImpl.cpp layerTests/FakeQuantizationTestImpl.hpp layerTests/FloorTestImpl.cpp diff --git a/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp new file mode 100644 index 0000000000..4c93735bc8 --- /dev/null +++ b/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp @@ -0,0 +1,77 @@ +// +// Copyright © 2019 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include "CommonTestUtils.hpp" + +#include <ResolveType.hpp> + +#include <armnn/INetwork.hpp> + +#include <boost/test/unit_test.hpp> + +#include <vector> + +namespace +{ + +template<armnn::DataType ArmnnTypeInput> +INetworkPtr CreateElementwiseUnaryNetwork(const TensorShape& inputShape, + const TensorShape& outputShape, + UnaryOperation operation, + const float qScale = 1.0f, + const int32_t qOffset = 0) +{ + using namespace armnn; + + INetworkPtr net(INetwork::Create()); + + ElementwiseUnaryDescriptor descriptor(operation); + IConnectableLayer* elementwiseUnaryLayer = net->AddElementwiseUnaryLayer(descriptor, "elementwiseUnary"); + + TensorInfo inputTensorInfo(inputShape, ArmnnTypeInput, qScale, qOffset); + IConnectableLayer* input = net->AddInputLayer(boost::numeric_cast<LayerBindingId>(0)); + Connect(input, elementwiseUnaryLayer, inputTensorInfo, 0, 0); + + TensorInfo outputTensorInfo(outputShape, ArmnnTypeInput, qScale, qOffset); + IConnectableLayer* output = net->AddOutputLayer(0, "output"); + Connect(elementwiseUnaryLayer, output, outputTensorInfo, 0, 0); + + return net; +} + +template<armnn::DataType ArmnnInType, + typename TInput = armnn::ResolveType<ArmnnInType>> +void ElementwiseUnarySimpleEndToEnd(const std::vector<BackendId>& backends, + UnaryOperation operation, + const std::vector<float> expectedOutput) +{ + using namespace armnn; + + const float qScale = IsQuantizedType<TInput>() ? 0.25f : 1.0f; + const int32_t qOffset = IsQuantizedType<TInput>() ? 50 : 0; + + const TensorShape& inputShape = { 2, 2, 2, 2 }; + const TensorShape& outputShape = { 2, 2, 2, 2 }; + + // Builds up the structure of the network + INetworkPtr net = CreateElementwiseUnaryNetwork<ArmnnInType>(inputShape, outputShape, operation, qScale, qOffset); + + BOOST_TEST_CHECKPOINT("create a network"); + + const std::vector<float> input({ 1, -1, 1, 1, 5, -5, 5, 5, + -3, 3, 3, 3, 4, 4, -4, 4 }); + + // quantize data + std::vector<TInput> qInputData = armnnUtils::QuantizedVector<TInput>(input, qScale, qOffset); + std::vector<TInput> qExpectedOutput = armnnUtils::QuantizedVector<TInput>(expectedOutput, qScale, qOffset); + + std::map<int, std::vector<TInput>> inputTensorData = {{ 0, qInputData }}; + std::map<int, std::vector<TInput>> expectedOutputData = {{ 0, qExpectedOutput }}; + + EndToEndLayerTestImpl<ArmnnInType, ArmnnInType>(move(net), inputTensorData, expectedOutputData, backends); +} + +} // anonymous namespace diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp index 031210f1fc..e4ce7407bf 100644 --- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp +++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp @@ -427,8 +427,6 @@ template<armnn::LayerType Type, armnn::DataType DataType> struct LayerTypePolicy; // Every entry in the armnn::LayerType enum must be accounted for below. -DECLARE_LAYER_POLICY_1_PARAM(Abs) - DECLARE_LAYER_POLICY_2_PARAM(Activation) DECLARE_LAYER_POLICY_1_PARAM(Addition) @@ -465,6 +463,8 @@ DECLARE_LAYER_POLICY_1_PARAM(Dequantize) DECLARE_LAYER_POLICY_2_PARAM(DetectionPostProcess) +DECLARE_LAYER_POLICY_2_PARAM(ElementwiseUnary) + DECLARE_LAYER_POLICY_2_PARAM(FakeQuantization) DECLARE_LAYER_POLICY_1_PARAM(Floor) @@ -517,8 +517,6 @@ DECLARE_LAYER_POLICY_2_PARAM(Resize) DECLARE_LAYER_POLICY_2_PARAM(Reshape) -DECLARE_LAYER_POLICY_1_PARAM(Rsqrt) - DECLARE_LAYER_POLICY_2_PARAM(Slice) DECLARE_LAYER_POLICY_2_PARAM(Softmax) diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp index 05c307ead2..eba7944cc3 100644 --- a/src/backends/backendsCommon/test/LayerTests.hpp +++ b/src/backends/backendsCommon/test/LayerTests.hpp @@ -22,6 +22,7 @@ #include <backendsCommon/test/layerTests/DequantizeTestImpl.hpp> #include <backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp> #include <backendsCommon/test/layerTests/DivisionTestImpl.hpp> +#include <backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.hpp> #include <backendsCommon/test/layerTests/FakeQuantizationTestImpl.hpp> #include <backendsCommon/test/layerTests/FloorTestImpl.hpp> #include <backendsCommon/test/layerTests/FullyConnectedTestImpl.hpp> diff --git a/src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp index cc57893439..7706809e8b 100644 --- a/src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp @@ -4,85 +4,15 @@ // #include "AbsTestImpl.hpp" +#include "ElementwiseUnaryTestImpl.hpp" -#include <backendsCommon/test/DataTypeUtils.hpp> -#include <backendsCommon/test/TensorCopyUtils.hpp> -#include <backendsCommon/test/WorkloadTestUtils.hpp> - -#include <test/TensorHelpers.hpp> - -namespace -{ - -template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>> -LayerTestResult<T, 2> Abs2dTestCommon( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::TensorInfo inputTensorInfo, - const armnn::TensorInfo outputTensorInfo, - const std::vector<float>& inputValues, - const std::vector<float>& expectedOutputValues) -{ - boost::ignore_unused(memoryManager); - auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputValues,inputTensorInfo)); - - LayerTestResult<T, 2> result(outputTensorInfo); - - result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, - ConvertToDataType<ArmnnType>(expectedOutputValues,outputTensorInfo)); - - std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); - std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - - armnn::AbsQueueDescriptor descriptor; - - armnn::WorkloadInfo info; - - AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); - AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); - - std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAbs(descriptor, info); - - inputHandle->Allocate(); - outputHandle->Allocate(); - - CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]); - - workload->PostAllocationConfigure(); - workload->Execute(); - - CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get()); - - return result; -} - -} // anonymous namespace - template<armnn::DataType ArmnnType, typename T> LayerTestResult<T, 2> Abs2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - const armnn::TensorShape inputShape{ 2, 2 }; - const armnn::TensorShape outputShape{ 2, 2 }; - - float qScale = 0.0625f; - int32_t qOffset = 64; - - if (ArmnnType == armnn::DataType::QSymmS16) - { - qScale = 0.1f; - qOffset = 0; - } - - armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); - inputTensorInfo.SetQuantizationScale(qScale); - inputTensorInfo.SetQuantizationOffset(qOffset); - - armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType); - outputTensorInfo.SetQuantizationScale(qScale); - outputTensorInfo.SetQuantizationOffset(qOffset); + const unsigned int inputShape[] = { 2, 2 }; std::vector<float> inputValues { @@ -98,9 +28,14 @@ LayerTestResult<T, 2> Abs2dTest( std::vector<float> expectedOutputValues(inputValues.size()); std::transform(inputValues.begin(), inputValues.end(), expectedOutputValues.begin(), f); - return Abs2dTestCommon<ArmnnType>(workloadFactory, memoryManager, - inputTensorInfo, outputTensorInfo, - inputValues, expectedOutputValues); + return ElementwiseUnaryTestHelper<2, ArmnnType>( + workloadFactory, + memoryManager, + armnn::UnaryOperation::Abs, + inputShape, + inputValues, + inputShape, + expectedOutputValues); } template<armnn::DataType ArmnnType, typename T> @@ -108,27 +43,7 @@ LayerTestResult<T, 3> Abs3dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - boost::ignore_unused(memoryManager); - - const armnn::TensorShape inputShape{ 3, 1, 2 }; - const armnn::TensorShape outputShape{ 3, 1, 2 }; - - float qScale = 0.0625f; - int32_t qOffset = 64; - - if (ArmnnType == armnn::DataType::QSymmS16) - { - qScale = 0.1f; - qOffset = 0; - } - - armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); - inputTensorInfo.SetQuantizationScale(qScale); - inputTensorInfo.SetQuantizationOffset(qOffset); - - armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType); - outputTensorInfo.SetQuantizationScale(qScale); - outputTensorInfo.SetQuantizationOffset(qOffset); + const unsigned int inputShape[] = { 3, 1, 2 }; std::vector<float> inputValues { @@ -143,35 +58,14 @@ LayerTestResult<T, 3> Abs3dTest( std::vector<float>expectedOutputValues(inputValues.size()); std::transform(inputValues.begin(), inputValues.end(), expectedOutputValues.begin(), f); - auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputValues,inputTensorInfo)); - - LayerTestResult<T, 3> result(outputTensorInfo); - result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, - ConvertToDataType<ArmnnType>(expectedOutputValues,outputTensorInfo)); - - std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); - std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - - armnn::AbsQueueDescriptor descriptor; - - armnn::WorkloadInfo info; - - AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); - AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); - - std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAbs(descriptor, info); - - inputHandle->Allocate(); - outputHandle->Allocate(); - - CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]); - - workload->PostAllocationConfigure(); - workload->Execute(); - - CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get()); - - return result; + return ElementwiseUnaryTestHelper<3, ArmnnType>( + workloadFactory, + memoryManager, + armnn::UnaryOperation::Abs, + inputShape, + inputValues, + inputShape, + expectedOutputValues); } template<armnn::DataType ArmnnType, typename T> @@ -179,14 +73,7 @@ LayerTestResult<T, 2> AbsZeroTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - const armnn::TensorShape inputShape{ 1, 2 }; - const armnn::TensorShape outputShape{ 1, 2 }; - - armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); - inputTensorInfo.SetQuantizationScale(0.1f); - - armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType); - outputTensorInfo.SetQuantizationScale(0.1f); + const unsigned int inputShape[] = { 1, 2 }; std::vector<float> inputValues { @@ -198,9 +85,14 @@ LayerTestResult<T, 2> AbsZeroTest( 0.f, 0.f }; - return Abs2dTestCommon<ArmnnType>(workloadFactory, memoryManager, - inputTensorInfo, outputTensorInfo, - inputValues, expectedOutputValues); + return ElementwiseUnaryTestHelper<2, ArmnnType>( + workloadFactory, + memoryManager, + armnn::UnaryOperation::Abs, + inputShape, + inputValues, + inputShape, + expectedOutputValues); } // diff --git a/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp index c0a779c0e6..cbbe140843 100644 --- a/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp @@ -15,6 +15,7 @@ #include <backendsCommon/WorkloadData.hpp> #include <backendsCommon/WorkloadFactory.hpp> +#include <backendsCommon/test/DataTypeUtils.hpp> #include <backendsCommon/test/TensorCopyUtils.hpp> #include <backendsCommon/test/WorkloadTestUtils.hpp> @@ -202,4 +203,4 @@ LayerTestResult<T, NumDims> ElementwiseTestHelper( outValues, quantScale, quantOffset); -} +}
\ No newline at end of file diff --git a/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.cpp new file mode 100644 index 0000000000..a2c88a62e7 --- /dev/null +++ b/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.cpp @@ -0,0 +1,14 @@ +// +// Copyright © 2019 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "ElementwiseUnaryTestImpl.hpp" + +std::unique_ptr<armnn::IWorkload> CreateWorkload( + const armnn::IWorkloadFactory& workloadFactory, + const armnn::WorkloadInfo& info, + const armnn::ElementwiseUnaryQueueDescriptor& descriptor) +{ + return workloadFactory.CreateElementwiseUnary(descriptor, info); +}
\ No newline at end of file diff --git a/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.hpp new file mode 100644 index 0000000000..bea4ec205e --- /dev/null +++ b/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.hpp @@ -0,0 +1,113 @@ +// +// Copyright © 2019 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "LayerTestResult.hpp" + +#include <armnn/ArmNN.hpp> + +#include <ResolveType.hpp> + +#include <armnn/backends/IBackendInternal.hpp> +#include <backendsCommon/Workload.hpp> +#include <backendsCommon/WorkloadData.hpp> +#include <backendsCommon/WorkloadFactory.hpp> + +#include <backendsCommon/test/DataTypeUtils.hpp> +#include <backendsCommon/test/TensorCopyUtils.hpp> +#include <backendsCommon/test/WorkloadTestUtils.hpp> + +#include <test/TensorHelpers.hpp> + +#include <memory> + +std::unique_ptr<armnn::IWorkload> CreateWorkload( + const armnn::IWorkloadFactory& workloadFactory, + const armnn::WorkloadInfo& info, + const armnn::ElementwiseUnaryQueueDescriptor& descriptor); + +template <std::size_t NumDims, + armnn::DataType ArmnnType, + typename T = armnn::ResolveType<ArmnnType>> +LayerTestResult<T, NumDims> ElementwiseUnaryTestHelper( + armnn::IWorkloadFactory & workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager, + armnn::UnaryOperation op, + const unsigned int shape[NumDims], + std::vector<float> values, + float quantScale, + int quantOffset, + const unsigned int outShape[NumDims], + std::vector<float> outValues, + float outQuantScale, + int outQuantOffset) +{ + armnn::TensorInfo inputTensorInfo{NumDims, shape, ArmnnType}; + armnn::TensorInfo outputTensorInfo{NumDims, outShape, ArmnnType}; + + inputTensorInfo.SetQuantizationScale(quantScale); + inputTensorInfo.SetQuantizationOffset(quantOffset); + + outputTensorInfo.SetQuantizationScale(outQuantScale); + outputTensorInfo.SetQuantizationOffset(outQuantOffset); + + auto input = MakeTensor<T, NumDims>(inputTensorInfo, ConvertToDataType<ArmnnType>(values, inputTensorInfo)); + + LayerTestResult<T, NumDims> ret(outputTensorInfo); + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::ElementwiseUnaryDescriptor desc(op); + armnn::ElementwiseUnaryQueueDescriptor qDesc; + qDesc.m_Parameters = desc; + armnn::WorkloadInfo info; + AddInputToWorkload(qDesc, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(qDesc, info, outputTensorInfo, outputHandle.get()); + auto workload = CreateWorkload(workloadFactory, info, qDesc); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), input.origin()); + + workload->PostAllocationConfigure(); + ExecuteWorkload(*workload, memoryManager); + + CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get()); + + ret.outputExpected = MakeTensor<T, NumDims>(outputTensorInfo, ConvertToDataType<ArmnnType>(outValues, + inputTensorInfo)); + return ret; +} + +template <std::size_t NumDims, + armnn::DataType ArmnnType, + typename T = armnn::ResolveType<ArmnnType>> +LayerTestResult<T, NumDims> ElementwiseUnaryTestHelper( + armnn::IWorkloadFactory & workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager, + armnn::UnaryOperation op, + const unsigned int shape[NumDims], + std::vector<float> values, + const unsigned int outShape[NumDims], + std::vector<float> outValues, + float quantScale = 1.0f, + int quantOffset = 0) +{ + return ElementwiseUnaryTestHelper<NumDims, ArmnnType>( + workloadFactory, + memoryManager, + op, + shape, + values, + quantScale, + quantOffset, + outShape, + outValues, + quantScale, + quantOffset); +}
\ No newline at end of file diff --git a/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp index db928cf2e0..ca423835dc 100644 --- a/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp @@ -4,76 +4,15 @@ // #include "ReshapeTestImpl.hpp" +#include "ElementwiseUnaryTestImpl.hpp" -#include <backendsCommon/test/DataTypeUtils.hpp> -#include <backendsCommon/test/TensorCopyUtils.hpp> -#include <backendsCommon/test/WorkloadTestUtils.hpp> - -#include <test/TensorHelpers.hpp> - -namespace -{ - -template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>> -LayerTestResult<T, 2> Rsqrt2dTestCommon( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::TensorInfo inputTensorInfo, - const armnn::TensorInfo outputTensorInfo, - const std::vector<float>& inputValues, - const std::vector<float>& expectedOutputValues) -{ - boost::ignore_unused(memoryManager); - auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputValues,inputTensorInfo)); - - LayerTestResult<T, 2> result(outputTensorInfo); - - result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, - ConvertToDataType<ArmnnType>(expectedOutputValues,outputTensorInfo)); - - std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); - std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - - armnn::RsqrtQueueDescriptor descriptor; - - armnn::WorkloadInfo info; - - AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); - AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); - - std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateRsqrt(descriptor, info); - - inputHandle->Allocate(); - outputHandle->Allocate(); - - CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]); - - workload->PostAllocationConfigure(); - workload->Execute(); - - CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get()); - - return result; -} - -} // anonymous namespace - template<armnn::DataType ArmnnType, typename T> LayerTestResult<T, 2> Rsqrt2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - const armnn::TensorShape inputShape{ 2, 2 }; - const armnn::TensorShape outputShape{ 2, 2 }; - - armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); - inputTensorInfo.SetQuantizationScale(0.1f); - inputTensorInfo.SetQuantizationOffset(0); - - armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType); - outputTensorInfo.SetQuantizationScale(0.1f); - outputTensorInfo.SetQuantizationOffset(0); + const unsigned int inputShape[] = { 2, 2 }; std::vector<float> inputValues { @@ -87,9 +26,14 @@ LayerTestResult<T, 2> Rsqrt2dTest( 0.25f, 0.2f }; - return Rsqrt2dTestCommon<ArmnnType>(workloadFactory, memoryManager, - inputTensorInfo, outputTensorInfo, - inputValues, expectedOutputValues); + return ElementwiseUnaryTestHelper<2, ArmnnType>( + workloadFactory, + memoryManager, + armnn::UnaryOperation::Rsqrt, + inputShape, + inputValues, + inputShape, + expectedOutputValues); } template<armnn::DataType ArmnnType, typename T> @@ -97,17 +41,7 @@ LayerTestResult<T, 3> Rsqrt3dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - boost::ignore_unused(memoryManager); - const armnn::TensorShape inputShape{ 3, 1, 2 }; - const armnn::TensorShape outputShape{ 3, 1, 2 }; - - armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); - inputTensorInfo.SetQuantizationScale(0.1f); - inputTensorInfo.SetQuantizationOffset(0); - - armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType); - outputTensorInfo.SetQuantizationScale(0.1f); - outputTensorInfo.SetQuantizationOffset(0); + const unsigned int inputShape[] = { 3, 1, 2 }; std::vector<float> inputValues { @@ -121,35 +55,14 @@ LayerTestResult<T, 3> Rsqrt3dTest( 0.2f, 0.125f, 0.1f }; - auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputValues,inputTensorInfo)); - - LayerTestResult<T, 3> result(outputTensorInfo); - result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, - ConvertToDataType<ArmnnType>(expectedOutputValues,outputTensorInfo)); - - std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); - std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - - armnn::RsqrtQueueDescriptor descriptor; - - armnn::WorkloadInfo info; - - AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); - AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); - - std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateRsqrt(descriptor, info); - - inputHandle->Allocate(); - outputHandle->Allocate(); - - CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]); - - workload->PostAllocationConfigure(); - workload->Execute(); - - CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get()); - - return result; + return ElementwiseUnaryTestHelper<3, ArmnnType>( + workloadFactory, + memoryManager, + armnn::UnaryOperation::Rsqrt, + inputShape, + inputValues, + inputShape, + expectedOutputValues); } template<armnn::DataType ArmnnType, typename T> @@ -157,14 +70,7 @@ LayerTestResult<T, 2> RsqrtZeroTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - const armnn::TensorShape inputShape{ 1, 2 }; - const armnn::TensorShape outputShape{ 1, 2 }; - - armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); - inputTensorInfo.SetQuantizationScale(0.1f); - - armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType); - outputTensorInfo.SetQuantizationScale(0.1f); + const unsigned int inputShape[] = { 1, 2 }; std::vector<float> inputValues { @@ -176,9 +82,14 @@ LayerTestResult<T, 2> RsqrtZeroTest( INFINITY, -INFINITY }; - return Rsqrt2dTestCommon<ArmnnType>(workloadFactory, memoryManager, - inputTensorInfo, outputTensorInfo, - inputValues, expectedOutputValues); + return ElementwiseUnaryTestHelper<2, ArmnnType>( + workloadFactory, + memoryManager, + armnn::UnaryOperation::Rsqrt, + inputShape, + inputValues, + inputShape, + expectedOutputValues); } template<armnn::DataType ArmnnType, typename T> @@ -186,16 +97,7 @@ LayerTestResult<T, 2> RsqrtNegativeTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - const armnn::TensorShape inputShape{ 1, 2 }; - const armnn::TensorShape outputShape{ 1, 2 }; - - armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); - inputTensorInfo.SetQuantizationScale(0.1f); - inputTensorInfo.SetQuantizationOffset(0); - - armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType); - outputTensorInfo.SetQuantizationScale(0.1f); - outputTensorInfo.SetQuantizationOffset(0); + const unsigned int inputShape[] = { 1, 2 }; std::vector<float> inputValues { @@ -207,9 +109,14 @@ LayerTestResult<T, 2> RsqrtNegativeTest( -NAN, -NAN }; - return Rsqrt2dTestCommon<ArmnnType>(workloadFactory, memoryManager, - inputTensorInfo, outputTensorInfo, - inputValues, expectedOutputValues); + return ElementwiseUnaryTestHelper<2, ArmnnType>( + workloadFactory, + memoryManager, + armnn::UnaryOperation::Rsqrt, + inputShape, + inputValues, + inputShape, + expectedOutputValues); } // diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp index f7129d6035..f8cc5074b3 100644 --- a/src/backends/cl/ClLayerSupport.cpp +++ b/src/backends/cl/ClLayerSupport.cpp @@ -160,10 +160,8 @@ bool ClLayerSupport::IsAbsSupported(const TensorInfo& input, const TensorInfo& output, Optional<std::string&> reasonIfUnsupported) const { - FORWARD_WORKLOAD_VALIDATE_FUNC(ClAbsWorkloadValidate, - reasonIfUnsupported, - input, - output); + ElementwiseUnaryDescriptor descriptor(UnaryOperation::Abs); + return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported); } bool ClLayerSupport::IsActivationSupported(const TensorInfo& input, @@ -425,6 +423,29 @@ bool ClLayerSupport::IsDivisionSupported(const TensorInfo& input0, output); } +bool ClLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input, + const TensorInfo& output, + const ElementwiseUnaryDescriptor& descriptor, + Optional<std::string&> reasonIfUnsupported) const +{ + if (descriptor.m_Operation == UnaryOperation::Abs) + { + FORWARD_WORKLOAD_VALIDATE_FUNC(ClAbsWorkloadValidate, + reasonIfUnsupported, + input, + output); + } + else if (descriptor.m_Operation == UnaryOperation::Rsqrt) + { + FORWARD_WORKLOAD_VALIDATE_FUNC(ClRsqrtWorkloadValidate, + reasonIfUnsupported, + input, + output); + } + + return false; +} + bool ClLayerSupport::IsFloorSupported(const TensorInfo& input, const TensorInfo& output, Optional<std::string&> reasonIfUnsupported) const @@ -685,7 +706,8 @@ bool ClLayerSupport::IsRsqrtSupported(const TensorInfo& input, const TensorInfo& output, Optional<std::string&> reasonIfUnsupported) const { - FORWARD_WORKLOAD_VALIDATE_FUNC(ClRsqrtWorkloadValidate, reasonIfUnsupported, input, output); + ElementwiseUnaryDescriptor descriptor(UnaryOperation::Rsqrt); + return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported); } bool ClLayerSupport::IsSliceSupported(const TensorInfo& input, diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp index a21589d555..9371717013 100644 --- a/src/backends/cl/ClLayerSupport.hpp +++ b/src/backends/cl/ClLayerSupport.hpp @@ -12,6 +12,7 @@ namespace armnn class ClLayerSupport : public LayerSupportBase { public: + ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead") bool IsAbsSupported(const TensorInfo& input, const TensorInfo& output, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; @@ -102,6 +103,11 @@ public: const TensorInfo& output, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + bool IsElementwiseUnarySupported(const TensorInfo& input, + const TensorInfo& ouput, + const ElementwiseUnaryDescriptor& descriptor, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + bool IsFloorSupported(const TensorInfo& input, const TensorInfo& output, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; @@ -223,6 +229,7 @@ public: const TensorInfo& output, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead") bool IsRsqrtSupported(const TensorInfo& input, const TensorInfo& output, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp index f9e6632b0c..4bb2e2a8ce 100644 --- a/src/backends/cl/ClWorkloadFactory.cpp +++ b/src/backends/cl/ClWorkloadFactory.cpp @@ -131,7 +131,12 @@ std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateSubTensorHandle(ITensorH std::unique_ptr<IWorkload> ClWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return MakeWorkload<ClAbsWorkload>(descriptor, info); + boost::ignore_unused(descriptor); + + ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor; + elementwiseUnaryDescriptor.m_Parameters = ElementwiseUnaryDescriptor(UnaryOperation::Abs); + + return CreateElementwiseUnary(elementwiseUnaryDescriptor, info); } std::unique_ptr<IWorkload> ClWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor, @@ -249,6 +254,28 @@ std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDivision(const DivisionQueue return MakeWorkload<ClDivisionFloatWorkload, NullWorkload>(descriptor, info); } +std::unique_ptr<IWorkload> ClWorkloadFactory::CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + if (descriptor.m_Parameters.m_Operation == UnaryOperation::Abs) + { + AbsQueueDescriptor absQueueDescriptor; + absQueueDescriptor.m_Inputs = descriptor.m_Inputs; + absQueueDescriptor.m_Outputs = descriptor.m_Outputs; + + return MakeWorkload<ClAbsWorkload>(absQueueDescriptor, info); + } + else if (descriptor.m_Parameters.m_Operation == UnaryOperation::Rsqrt) + { + RsqrtQueueDescriptor rsqrtQueueDescriptor; + rsqrtQueueDescriptor.m_Inputs = descriptor.m_Inputs; + rsqrtQueueDescriptor.m_Outputs = descriptor.m_Outputs; + + return MakeWorkload<ClRsqrtWorkload>(rsqrtQueueDescriptor, info); + } + return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info); +} + std::unique_ptr<IWorkload> ClWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor, const WorkloadInfo& info) const { @@ -450,7 +477,12 @@ std::unique_ptr<IWorkload> ClWorkloadFactory::CreateResizeBilinear(const ResizeB std::unique_ptr<IWorkload> ClWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return MakeWorkload<ClRsqrtWorkload>(descriptor, info); + boost::ignore_unused(descriptor); + + ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor; + elementwiseUnaryDescriptor.m_Parameters = ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt); + + return CreateElementwiseUnary(elementwiseUnaryDescriptor, info); } std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSlice(const SliceQueueDescriptor& descriptor, diff --git a/src/backends/cl/ClWorkloadFactory.hpp b/src/backends/cl/ClWorkloadFactory.hpp index 8f377e959d..980be9192e 100644 --- a/src/backends/cl/ClWorkloadFactory.hpp +++ b/src/backends/cl/ClWorkloadFactory.hpp @@ -38,6 +38,7 @@ public: DataLayout dataLayout, const bool IsMemoryManaged = true) const override; + ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead") std::unique_ptr<IWorkload> CreateAbs(const AbsQueueDescriptor& descriptor, const WorkloadInfo& info) const override; @@ -92,6 +93,9 @@ public: std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& descriptor, const WorkloadInfo& info) const override; + std::unique_ptr<IWorkload> CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + std::unique_ptr<IWorkload> CreateEqual(const EqualQueueDescriptor& descriptor, const WorkloadInfo& info) const override; @@ -178,6 +182,7 @@ public: std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor, const WorkloadInfo& info) const override; + ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead") std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& descriptor, const WorkloadInfo& info) const override; diff --git a/src/backends/cl/test/ClCreateWorkloadTests.cpp b/src/backends/cl/test/ClCreateWorkloadTests.cpp index d79745c420..92e771760f 100644 --- a/src/backends/cl/test/ClCreateWorkloadTests.cpp +++ b/src/backends/cl/test/ClCreateWorkloadTests.cpp @@ -146,18 +146,16 @@ BOOST_AUTO_TEST_CASE(CreateDivisionFloat16WorkloadTest) armnn::DataType::Float16>(); } -template <typename WorkloadType, +template <typename WorkloadType, typename DescriptorType, - typename LayerType, armnn::DataType DataType> -static void ClCreateElementwiseUnaryWorkloadTest() +static void ClCreateElementwiseUnaryWorkloadTest(armnn::UnaryOperation op) { Graph graph; ClWorkloadFactory factory = ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager()); - auto workload = CreateElementwiseUnaryWorkloadTest - <WorkloadType, DescriptorType, LayerType, DataType>(factory, graph); + auto workload = CreateElementwiseUnaryWorkloadTest<WorkloadType, DescriptorType, DataType>(factory, graph, op); DescriptorType queueDescriptor = workload->GetData(); @@ -170,10 +168,8 @@ static void ClCreateElementwiseUnaryWorkloadTest() BOOST_AUTO_TEST_CASE(CreateRsqrtFloat32WorkloadTest) { - ClCreateElementwiseUnaryWorkloadTest<ClRsqrtWorkload, - RsqrtQueueDescriptor, - RsqrtLayer, - armnn::DataType::Float32>(); + ClCreateElementwiseUnaryWorkloadTest<ClRsqrtWorkload, RsqrtQueueDescriptor, armnn::DataType::Float32>( + UnaryOperation::Rsqrt); } template <typename BatchNormalizationWorkloadType, armnn::DataType DataType> diff --git a/src/backends/cl/test/ClEndToEndTests.cpp b/src/backends/cl/test/ClEndToEndTests.cpp index 260f8f68cd..eafdb7c3e5 100644 --- a/src/backends/cl/test/ClEndToEndTests.cpp +++ b/src/backends/cl/test/ClEndToEndTests.cpp @@ -5,12 +5,12 @@ #include <backendsCommon/test/EndToEndTestImpl.hpp> -#include <backendsCommon/test/AbsEndToEndTestImpl.hpp> #include <backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp> #include <backendsCommon/test/ComparisonEndToEndTestImpl.hpp> #include <backendsCommon/test/ConcatEndToEndTestImpl.hpp> #include <backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp> #include <backendsCommon/test/DequantizeEndToEndTestImpl.hpp> +#include <backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp> #include <backendsCommon/test/InstanceNormalizationEndToEndTestImpl.hpp> #include <backendsCommon/test/PreluEndToEndTestImpl.hpp> #include <backendsCommon/test/QuantizedLstmEndToEndTestImpl.hpp> @@ -27,7 +27,15 @@ std::vector<armnn::BackendId> defaultBackends = {armnn::Compute::GpuAcc}; // Abs BOOST_AUTO_TEST_CASE(ClAbsEndToEndTestFloat32) { - AbsEndToEnd<armnn::DataType::Float32>(defaultBackends); + std::vector<float> expectedOutput = + { + 1.f, 1.f, 1.f, 1.f, 5.f, 5.f, 5.f, 5.f, + 3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f + }; + + ElementwiseUnarySimpleEndToEnd<armnn::DataType::Float32>(defaultBackends, + UnaryOperation::Abs, + expectedOutput); } // Constant diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp index a73837b884..fe9bffbee9 100644 --- a/src/backends/neon/NeonLayerSupport.cpp +++ b/src/backends/neon/NeonLayerSupport.cpp @@ -129,10 +129,8 @@ bool NeonLayerSupport::IsAbsSupported(const TensorInfo& input, const TensorInfo& output, Optional<std::string&> reasonIfUnsupported) const { - FORWARD_WORKLOAD_VALIDATE_FUNC(NeonAbsWorkloadValidate, - reasonIfUnsupported, - input, - output); + ElementwiseUnaryDescriptor descriptor(UnaryOperation::Abs); + return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported); } bool NeonLayerSupport::IsActivationSupported(const TensorInfo& input, @@ -386,6 +384,29 @@ bool NeonLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& biases); } +bool NeonLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input, + const TensorInfo& output, + const ElementwiseUnaryDescriptor& descriptor, + Optional<std::string&> reasonIfUnsupported) const +{ + if (descriptor.m_Operation == UnaryOperation::Abs) + { + FORWARD_WORKLOAD_VALIDATE_FUNC(NeonAbsWorkloadValidate, + reasonIfUnsupported, + input, + output); + } + else if (descriptor.m_Operation == UnaryOperation::Rsqrt) + { + FORWARD_WORKLOAD_VALIDATE_FUNC(NeonRsqrtWorkloadValidate, + reasonIfUnsupported, + input, + output); + } + + return false; +} + bool NeonLayerSupport::IsFloorSupported(const TensorInfo& input, const TensorInfo& output, Optional<std::string&> reasonIfUnsupported) const @@ -656,7 +677,8 @@ bool NeonLayerSupport::IsRsqrtSupported(const TensorInfo& input, const TensorInfo& output, Optional<std::string&> reasonIfUnsupported) const { - FORWARD_WORKLOAD_VALIDATE_FUNC(NeonRsqrtWorkloadValidate, reasonIfUnsupported, input, output); + ElementwiseUnaryDescriptor descriptor(UnaryOperation::Rsqrt); + return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported); } bool NeonLayerSupport::IsSliceSupported(const TensorInfo& input, diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp index 8e6cd6aded..d429aeceec 100644 --- a/src/backends/neon/NeonLayerSupport.hpp +++ b/src/backends/neon/NeonLayerSupport.hpp @@ -12,6 +12,7 @@ namespace armnn class NeonLayerSupport : public LayerSupportBase { public: + ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead") bool IsAbsSupported(const TensorInfo& input, const TensorInfo& output, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; @@ -103,6 +104,11 @@ public: const Optional<TensorInfo>& biases, Optional<std::string&> reason = EmptyOptional()) const override; + bool IsElementwiseUnarySupported(const TensorInfo& input, + const TensorInfo& output, + const ElementwiseUnaryDescriptor& descriptor, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + bool IsFloorSupported(const TensorInfo& input, const TensorInfo& output, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; @@ -224,6 +230,7 @@ public: const TensorInfo& output, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead") bool IsRsqrtSupported(const TensorInfo& input, const TensorInfo& output, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp index 1cc9e50e0b..82f9bdb924 100644 --- a/src/backends/neon/NeonWorkloadFactory.cpp +++ b/src/backends/neon/NeonWorkloadFactory.cpp @@ -98,7 +98,12 @@ std::unique_ptr<ITensorHandle> NeonWorkloadFactory::CreateTensorHandle(const Ten std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return std::make_unique<NeonAbsWorkload>(descriptor, info); + boost::ignore_unused(descriptor); + + ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor; + elementwiseUnaryDescriptor.m_Parameters = ElementwiseUnaryDescriptor(UnaryOperation::Abs); + + return CreateElementwiseUnary(elementwiseUnaryDescriptor, info); } std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor, @@ -214,6 +219,29 @@ std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateDivision( return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info); } +std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& + descriptor, + const WorkloadInfo& info) const +{ + if (descriptor.m_Parameters.m_Operation == UnaryOperation::Abs) + { + AbsQueueDescriptor absQueueDescriptor; + absQueueDescriptor.m_Inputs = descriptor.m_Inputs; + absQueueDescriptor.m_Outputs = descriptor.m_Outputs; + + return std::make_unique<NeonAbsWorkload>(absQueueDescriptor, info); + } + else if (descriptor.m_Parameters.m_Operation == UnaryOperation::Rsqrt) + { + RsqrtQueueDescriptor rsqrtQueueDescriptor; + rsqrtQueueDescriptor.m_Inputs = descriptor.m_Inputs; + rsqrtQueueDescriptor.m_Outputs = descriptor.m_Outputs; + + return std::make_unique<NeonRsqrtWorkload>(rsqrtQueueDescriptor, info); + } + return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info); +} + std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor, const WorkloadInfo& info) const { @@ -418,7 +446,12 @@ std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateResizeBilinear( std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor &descriptor, const WorkloadInfo &info) const { - return std::make_unique<NeonRsqrtWorkload>(descriptor, info); + boost::ignore_unused(descriptor); + + ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor; + elementwiseUnaryDescriptor.m_Parameters = ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt); + + return CreateElementwiseUnary(elementwiseUnaryDescriptor, info); } std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSlice(const SliceQueueDescriptor& descriptor, diff --git a/src/backends/neon/NeonWorkloadFactory.hpp b/src/backends/neon/NeonWorkloadFactory.hpp index b76a3a340a..44c0629ece 100644 --- a/src/backends/neon/NeonWorkloadFactory.hpp +++ b/src/backends/neon/NeonWorkloadFactory.hpp @@ -39,6 +39,7 @@ public: DataLayout dataLayout, const bool IsMemoryManaged = true) const override; + ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead") std::unique_ptr<IWorkload> CreateAbs(const AbsQueueDescriptor& descriptor, const WorkloadInfo& info) const override; @@ -92,6 +93,9 @@ public: std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& descriptor, const WorkloadInfo& info) const override; + + std::unique_ptr<IWorkload> CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor, + const WorkloadInfo& Info) const override; ARMNN_DEPRECATED_MSG("Use CreateComparison instead") std::unique_ptr<IWorkload> CreateEqual(const EqualQueueDescriptor& descriptor, @@ -181,6 +185,7 @@ public: std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor, const WorkloadInfo& info) const override; + ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead") std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& descriptor, const WorkloadInfo& info) const override; diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp index a08c8f7d2a..400a5a38e2 100644 --- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp +++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp @@ -181,36 +181,6 @@ BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload) DataType::QAsymmU8>(); } -template <typename WorkloadType, - typename DescriptorType, - typename LayerType, - armnn::DataType DataType> -static void NeonCreateElementwiseUnaryWorkloadTest() -{ - Graph graph; - NeonWorkloadFactory factory = - NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager()); - - auto workload = CreateElementwiseUnaryWorkloadTest - <WorkloadType, DescriptorType, LayerType, DataType>(factory, graph); - - DescriptorType queueDescriptor = workload->GetData(); - - auto inputHandle = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]); - - BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({2, 3}, DataType))); - BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({2, 3}, DataType))); -} - -BOOST_AUTO_TEST_CASE(CreateRsqrtFloat32Workload) -{ - NeonCreateElementwiseUnaryWorkloadTest<NeonRsqrtWorkload, - RsqrtQueueDescriptor, - RsqrtLayer, - DataType::Float32>(); -} - template <typename BatchNormalizationWorkloadType, typename armnn::DataType DataType> static void NeonCreateBatchNormalizationWorkloadTest(DataLayout dataLayout) { diff --git a/src/backends/neon/test/NeonEndToEndTests.cpp b/src/backends/neon/test/NeonEndToEndTests.cpp index e1c929b17b..4e9fe0f3c3 100644 --- a/src/backends/neon/test/NeonEndToEndTests.cpp +++ b/src/backends/neon/test/NeonEndToEndTests.cpp @@ -5,13 +5,13 @@ #include <backendsCommon/test/EndToEndTestImpl.hpp> -#include <backendsCommon/test/AbsEndToEndTestImpl.hpp> #include <backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp> #include <backendsCommon/test/ComparisonEndToEndTestImpl.hpp> #include <backendsCommon/test/ConcatEndToEndTestImpl.hpp> #include <backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp> #include <backendsCommon/test/DequantizeEndToEndTestImpl.hpp> #include <backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp> +#include <backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp> #include <backendsCommon/test/InstanceNormalizationEndToEndTestImpl.hpp> #include <backendsCommon/test/PreluEndToEndTestImpl.hpp> #include <backendsCommon/test/QuantizedLstmEndToEndTestImpl.hpp> @@ -28,7 +28,15 @@ std::vector<armnn::BackendId> defaultBackends = {armnn::Compute::CpuAcc}; // Abs BOOST_AUTO_TEST_CASE(NeonAbsEndToEndTestFloat32) { - AbsEndToEnd<armnn::DataType::Float32>(defaultBackends); + std::vector<float> expectedOutput = + { + 1.f, 1.f, 1.f, 1.f, 5.f, 5.f, 5.f, 5.f, + 3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f + }; + + ElementwiseUnarySimpleEndToEnd<armnn::DataType::Float32>(defaultBackends, + UnaryOperation::Abs, + expectedOutput); } // Constant diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp index 26a61d45d5..491081dbac 100644 --- a/src/backends/reference/RefLayerSupport.cpp +++ b/src/backends/reference/RefLayerSupport.cpp @@ -70,28 +70,10 @@ std::string CreateIncorrectDimensionsErrorMsg(unsigned int expected, bool RefLayerSupport::IsAbsSupported(const TensorInfo& input, const TensorInfo& output, Optional<std::string&> reasonIfUnsupported) const { - bool supported = true; - std::array<DataType,4> supportedTypes = - { - DataType::Float32, - DataType::Float16, - DataType::QAsymmU8, - DataType::QSymmS16 - }; - - supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported, - "Reference abs: input type not supported"); - - supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported, - "Reference abs: output type not supported"); - - supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported, - "Reference abs: input and output types not matching"); - - supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported, - "Reference abs: input and output shapes have different number of total elements"); - - return supported; + return IsElementwiseUnarySupported(input, + output, + ElementwiseUnaryDescriptor(UnaryOperation::Abs), + reasonIfUnsupported); } bool RefLayerSupport::IsActivationSupported(const TensorInfo& input, @@ -714,6 +696,39 @@ bool RefLayerSupport::IsDivisionSupported(const TensorInfo& input0, return supported; } +bool RefLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input, + const TensorInfo& output, + const ElementwiseUnaryDescriptor& descriptor, + Optional<std::string&> reasonIfUnsupported) const +{ + boost::ignore_unused(descriptor); + + std::array<DataType, 4> supportedTypes = + { + DataType::Float32, + DataType::Float16, + DataType::QAsymmU8, + DataType::QSymmS16 + }; + + bool supported = true; + + supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported, + "Reference elementwise unary: input type not supported"); + + supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported, + "Reference elementwise unary: output type not supported"); + + supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported, + "Reference elementwise unary: input and output types not matching"); + + supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported, + "Reference elementwise unary: input and output shapes" + "have different number of total elements"); + + return supported; +} + bool RefLayerSupport::IsEqualSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, @@ -1499,28 +1514,10 @@ bool RefLayerSupport::IsRsqrtSupported(const TensorInfo& input, const TensorInfo& output, Optional<std::string&> reasonIfUnsupported) const { - bool supported = true; - std::array<DataType,4> supportedTypes = - { - DataType::Float32, - DataType::Float16, - DataType::QAsymmU8, - DataType::QSymmS16 - }; - - supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported, - "Reference rsqrt: input type not supported"); - - supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported, - "Reference rsqrt: output type not supported"); - - supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported, - "Reference rsqrt: input and output types not matching"); - - supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported, - "Reference Rsqrt: input and output shapes have different number of total elements"); - - return supported; + return IsElementwiseUnarySupported(input, + output, + ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt), + reasonIfUnsupported); } bool RefLayerSupport::IsSliceSupported(const TensorInfo& input, diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp index a7d6303d86..123c2643df 100644 --- a/src/backends/reference/RefLayerSupport.hpp +++ b/src/backends/reference/RefLayerSupport.hpp @@ -12,6 +12,7 @@ namespace armnn class RefLayerSupport : public LayerSupportBase { public: + ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead") bool IsAbsSupported(const TensorInfo& input, const TensorInfo& output, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; @@ -117,6 +118,11 @@ public: const TensorInfo& output, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + bool IsElementwiseUnarySupported(const TensorInfo& input, + const TensorInfo& output, + const ElementwiseUnaryDescriptor& descriptor, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead") bool IsEqualSupported(const TensorInfo& input0, const TensorInfo& input1, @@ -247,7 +253,8 @@ public: const TensorInfo& output, const ResizeDescriptor& descriptor, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - + + ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead") bool IsRsqrtSupported(const TensorInfo& input, const TensorInfo& output, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp index 2db47d35c2..e7a9c19fc7 100644 --- a/src/backends/reference/RefWorkloadFactory.cpp +++ b/src/backends/reference/RefWorkloadFactory.cpp @@ -98,7 +98,11 @@ std::unique_ptr<ITensorHandle> RefWorkloadFactory::CreateTensorHandle(const Tens std::unique_ptr<IWorkload> RefWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return std::make_unique<RefAbsWorkload>(descriptor, info); + boost::ignore_unused(descriptor); + ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor; + elementwiseUnaryDescriptor.m_Parameters.m_Operation = UnaryOperation::Abs; + + return CreateElementwiseUnary(elementwiseUnaryDescriptor, info); } std::unique_ptr<IWorkload> RefWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor, @@ -221,6 +225,12 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDivision(const DivisionQueu return std::make_unique<RefDivisionWorkload>(descriptor, info); } +std::unique_ptr<IWorkload> RefWorkloadFactory::CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return std::make_unique<RefElementwiseUnaryWorkload>(descriptor, info); +} + std::unique_ptr<IWorkload> RefWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor, const WorkloadInfo& info) const { @@ -463,7 +473,11 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateResizeBilinear(const Resize std::unique_ptr<IWorkload> RefWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return std::make_unique<RefRsqrtWorkload>(descriptor, info); + boost::ignore_unused(descriptor); + ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor; + elementwiseUnaryDescriptor.m_Parameters.m_Operation = UnaryOperation::Rsqrt; + + return CreateElementwiseUnary(elementwiseUnaryDescriptor, info); } std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSlice(const SliceQueueDescriptor& descriptor, diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp index 80393c3f3a..b5b9b0faf0 100644 --- a/src/backends/reference/RefWorkloadFactory.hpp +++ b/src/backends/reference/RefWorkloadFactory.hpp @@ -59,6 +59,7 @@ public: DataLayout dataLayout, const bool IsMemoryManaged = true) const override; + ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead") std::unique_ptr<IWorkload> CreateAbs(const AbsQueueDescriptor& descriptor, const WorkloadInfo& info) const override; @@ -113,6 +114,9 @@ public: std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& descriptor, const WorkloadInfo& info) const override; + std::unique_ptr<IWorkload> CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + ARMNN_DEPRECATED_MSG("Use CreateComparison instead") std::unique_ptr<IWorkload> CreateEqual(const EqualQueueDescriptor& descriptor, const WorkloadInfo& info) const override; @@ -204,6 +208,7 @@ public: std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor, const WorkloadInfo& info) const override; + ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead") std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& descriptor, const WorkloadInfo& info) const override; diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk index 5f9af59e74..412dc9438c 100644 --- a/src/backends/reference/backend.mk +++ b/src/backends/reference/backend.mk @@ -21,7 +21,6 @@ BACKEND_SOURCES := \ RefWorkloadFactory.cpp \ RefRegistryInitializer.cpp \ RefTensorHandleFactory.cpp \ - workloads/Abs.cpp \ workloads/Activation.cpp \ workloads/ArgMinMax.cpp \ workloads/BatchNormImpl.cpp \ @@ -43,7 +42,6 @@ BACKEND_SOURCES := \ workloads/Pad.cpp \ workloads/Pooling2d.cpp \ workloads/PreluImpl.cpp \ - workloads/RefAbsWorkload.cpp \ workloads/RefActivationWorkload.cpp \ workloads/RefArgMinMaxWorkload.cpp \ workloads/RefBatchNormalizationWorkload.cpp \ @@ -60,6 +58,7 @@ BACKEND_SOURCES := \ workloads/RefDequantizeWorkload.cpp \ workloads/RefDetectionPostProcessWorkload.cpp \ workloads/RefElementwiseWorkload.cpp \ + workloads/RefElementwiseUnaryWorkload.cpp \ workloads/RefFakeQuantizationFloat32Workload.cpp \ workloads/RefFloorWorkload.cpp \ workloads/RefFullyConnectedWorkload.cpp \ @@ -78,7 +77,6 @@ BACKEND_SOURCES := \ workloads/RefReshapeWorkload.cpp \ workloads/RefResizeBilinearWorkload.cpp \ workloads/RefResizeWorkload.cpp \ - workloads/RefRsqrtWorkload.cpp \ workloads/RefSliceWorkload.cpp \ workloads/RefSoftmaxWorkload.cpp \ workloads/RefSpaceToBatchNdWorkload.cpp \ @@ -88,7 +86,6 @@ BACKEND_SOURCES := \ workloads/RefSplitterWorkload.cpp \ workloads/RefTransposeConvolution2dWorkload.cpp \ workloads/Resize.cpp \ - workloads/Rsqrt.cpp \ workloads/Slice.cpp \ workloads/SpaceToBatchNd.cpp \ workloads/SpaceToDepth.cpp \ diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp index 23a8e9b9e9..b83d205970 100644 --- a/src/backends/reference/test/RefCreateWorkloadTests.cpp +++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp @@ -717,41 +717,6 @@ BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32Nhwc) RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::Float32>(DataLayout::NHWC); } -template <typename RsqrtWorkloadType, armnn::DataType DataType> -static void RefCreateRsqrtTest() -{ - Graph graph; - RefWorkloadFactory factory = GetFactory(); - - auto workload = CreateRsqrtWorkloadTest<RsqrtWorkloadType, DataType>(factory, graph); - - // Checks that outputs are as we expect them (see definition of CreateRsqrtWorkloadTest). - CheckInputOutput(std::move(workload), - TensorInfo({ 1, 1 }, DataType), - TensorInfo({ 1, 1 }, DataType)); - -} - -BOOST_AUTO_TEST_CASE(CreateRsqrtFloat32) -{ - RefCreateRsqrtTest<RefRsqrtWorkload, armnn::DataType::Float32>(); -} - -BOOST_AUTO_TEST_CASE(CreateRsqrtFloat16) -{ - RefCreateRsqrtTest<RefRsqrtWorkload, armnn::DataType::Float16>(); -} - -BOOST_AUTO_TEST_CASE(CreateRsqrtUint8) -{ - RefCreateRsqrtTest<RefRsqrtWorkload, armnn::DataType::QAsymmU8>(); -} - -BOOST_AUTO_TEST_CASE(CreateRsqrtQsymm16) -{ - RefCreateRsqrtTest<RefRsqrtWorkload, armnn::DataType::QSymmS16>(); -} - template <typename BatchToSpaceNdWorkloadType, armnn::DataType DataType> static void RefCreateBatchToSpaceNdTest() { diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp index 75eccdee88..54a68810f6 100644 --- a/src/backends/reference/test/RefEndToEndTests.cpp +++ b/src/backends/reference/test/RefEndToEndTests.cpp @@ -5,7 +5,6 @@ #include <backendsCommon/test/EndToEndTestImpl.hpp> -#include <backendsCommon/test/AbsEndToEndTestImpl.hpp> #include <backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp> #include <backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp> #include <backendsCommon/test/ComparisonEndToEndTestImpl.hpp> @@ -13,6 +12,7 @@ #include <backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp> #include <backendsCommon/test/DequantizeEndToEndTestImpl.hpp> #include <backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp> +#include <backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp> #include <backendsCommon/test/GatherEndToEndTestImpl.hpp> #include <backendsCommon/test/InstanceNormalizationEndToEndTestImpl.hpp> #include <backendsCommon/test/LogSoftmaxEndToEndTestImpl.hpp> @@ -32,17 +32,43 @@ std::vector<armnn::BackendId> defaultBackends = {armnn::Compute::CpuRef}; // Abs BOOST_AUTO_TEST_CASE(RefAbsEndToEndTestFloat32) { - AbsEndToEnd<armnn::DataType::Float32>(defaultBackends); + std::vector<float> expectedOutput = + { + 1.f, 1.f, 1.f, 1.f, 5.f, 5.f, 5.f, 5.f, + 3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f + }; + + ElementwiseUnarySimpleEndToEnd<armnn::DataType::Float32>(defaultBackends, + UnaryOperation::Abs, + expectedOutput); } BOOST_AUTO_TEST_CASE(RefAbsEndToEndTestUint8) { - AbsEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends); + // Note the expected output will be implicitly quantized by the below test function + std::vector<float> expectedOutput = + { + 1.f, 1.f, 1.f, 1.f, 5.f, 5.f, 5.f, 5.f, + 3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f + }; + + ElementwiseUnarySimpleEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, + UnaryOperation::Abs, + expectedOutput); } BOOST_AUTO_TEST_CASE(RefAbsEndToEndTestInt16) { - AbsEndToEnd<armnn::DataType::QSymmS16>(defaultBackends); + // Note the expected output will be implicitly quantized by the below test function + std::vector<float> expectedOutput = + { + 1.f, 1.f, 1.f, 1.f, 5.f, 5.f, 5.f, 5.f, + 3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f + }; + + ElementwiseUnarySimpleEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, + UnaryOperation::Abs, + expectedOutput); } // Constant diff --git a/src/backends/reference/workloads/Abs.cpp b/src/backends/reference/workloads/Abs.cpp deleted file mode 100644 index 6a6a79ca56..0000000000 --- a/src/backends/reference/workloads/Abs.cpp +++ /dev/null @@ -1,23 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "Abs.hpp" - -namespace armnn -{ - -void Abs(Decoder<float>& in, - Encoder<float>& out, - const TensorInfo& tensorInfo) -{ - for (unsigned int i = 0u; i < tensorInfo.GetNumElements(); ++i) - { - out[i]; - in[i]; - out.Set(std::abs(in.Get())); - } -} - -} //namespace armnn diff --git a/src/backends/reference/workloads/Abs.hpp b/src/backends/reference/workloads/Abs.hpp index b1165d2d93..b05f2e3367 100644 --- a/src/backends/reference/workloads/Abs.hpp +++ b/src/backends/reference/workloads/Abs.hpp @@ -1,19 +1,22 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2019 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // -#include "BaseIterator.hpp" -#include <armnn/Tensor.hpp> -#include <armnn/Types.hpp> +#pragma once + +#include <iostream> namespace armnn { - -/// Performs the absolute function elementwise -/// on the inputs to give the outputs. -void Abs(Decoder<float>& in, - Encoder<float>& out, - const TensorInfo& tensorInfo); + template<typename T> +struct abs : public std::unary_function<T, T> + { + T + operator () (const T& inputData) const + { + return std::abs(inputData); + } + }; } //namespace armnn diff --git a/src/backends/reference/workloads/Broadcast.cpp b/src/backends/reference/workloads/Broadcast.cpp index 8421a0a7ed..24af0fc4b1 100644 --- a/src/backends/reference/workloads/Broadcast.cpp +++ b/src/backends/reference/workloads/Broadcast.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2019 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // @@ -30,4 +30,23 @@ BroadcastLoop::BroadcastLoop(const TensorShape& inShape0, const TensorShape& inS } } +BroadcastLoop::BroadcastLoop(const TensorShape& inShape, const TensorShape& outShape) +: m_DimData(outShape.GetNumDimensions()) +{ + const unsigned int numDims = GetNumDimensions(); + + unsigned int sIn = 1; + unsigned int sOut = 1; + + for (unsigned int j = numDims - 1, k = 0; k < numDims ; k++, j--) + { + m_DimData[j].m_DimSize = outShape[j]; + m_DimData[j].m_Stride1 = (inShape[j] > 1) ? sIn : 0; + m_DimData[j].m_StrideOut = sOut; + + sIn *= inShape[j]; + sOut *= outShape[j]; + } +} + } // namespace armnn diff --git a/src/backends/reference/workloads/Broadcast.hpp b/src/backends/reference/workloads/Broadcast.hpp index 5bf6be8939..a3d944ae75 100644 --- a/src/backends/reference/workloads/Broadcast.hpp +++ b/src/backends/reference/workloads/Broadcast.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2019 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // @@ -15,6 +15,8 @@ struct BroadcastLoop { BroadcastLoop(const TensorShape& inShape0, const TensorShape& inShape1, const TensorShape& outShape); + BroadcastLoop(const TensorShape& inShape, const TensorShape& outShape); + unsigned int GetNumDimensions() { return static_cast<unsigned int>(m_DimData.size()); @@ -56,6 +58,37 @@ struct BroadcastLoop outData -= outDataMovement; } + template <typename Func, typename DecoderOp, typename EncoderOp> + void Unroll(Func operationFunc, + unsigned int dimension, + DecoderOp& inData, + EncoderOp& outData) + { + if (dimension >= GetNumDimensions()) + { + outData.Set(operationFunc(inData.Get())); + return; + } + + unsigned int inDataMovement = 0; + unsigned int outDataMovement = 0; + + for (unsigned int i = 0; i < m_DimData[dimension].m_DimSize; i++) + { + Unroll(operationFunc, dimension + 1, inData, outData); + + inData += m_DimData[dimension].m_Stride1; + outData += m_DimData[dimension].m_StrideOut; + + inDataMovement += m_DimData[dimension].m_Stride1; + outDataMovement += m_DimData[dimension].m_StrideOut; + } + + // move iterator back to the start + inData -= inDataMovement; + outData -= outDataMovement; + } + private: // Struct to hold the dimension data. struct BroadcastDimensionData diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt index dbbdd89fd4..6795204d59 100644 --- a/src/backends/reference/workloads/CMakeLists.txt +++ b/src/backends/reference/workloads/CMakeLists.txt @@ -4,7 +4,6 @@ # list(APPEND armnnRefBackendWorkloads_sources - Abs.cpp Abs.hpp ArgMinMax.cpp ArgMinMax.hpp @@ -33,6 +32,7 @@ list(APPEND armnnRefBackendWorkloads_sources ElementwiseFunction.cpp ElementwiseFunction.hpp Encoders.hpp + Exp.hpp FullyConnected.cpp FullyConnected.hpp Gather.cpp @@ -55,8 +55,6 @@ list(APPEND armnnRefBackendWorkloads_sources Pooling2d.hpp PreluImpl.cpp PreluImpl.hpp - RefAbsWorkload.cpp - RefAbsWorkload.hpp RefActivationWorkload.cpp RefActivationWorkload.hpp RefArgMinMaxWorkload.cpp @@ -89,6 +87,8 @@ list(APPEND armnnRefBackendWorkloads_sources RefDequantizeWorkload.hpp RefDetectionPostProcessWorkload.cpp RefDetectionPostProcessWorkload.hpp + RefElementwiseUnaryWorkload.cpp + RefElementwiseUnaryWorkload.hpp RefFakeQuantizationFloat32Workload.cpp RefFakeQuantizationFloat32Workload.hpp RefFloorWorkload.cpp @@ -125,8 +125,6 @@ list(APPEND armnnRefBackendWorkloads_sources RefResizeBilinearWorkload.hpp RefResizeWorkload.cpp RefResizeWorkload.hpp - RefRsqrtWorkload.cpp - RefRsqrtWorkload.hpp RefSliceWorkload.cpp RefSliceWorkload.hpp RefSoftmaxWorkload.cpp @@ -147,7 +145,6 @@ list(APPEND armnnRefBackendWorkloads_sources RefWorkloadUtils.hpp Resize.cpp Resize.hpp - Rsqrt.cpp Rsqrt.hpp Slice.cpp Slice.hpp @@ -159,6 +156,7 @@ list(APPEND armnnRefBackendWorkloads_sources SpaceToDepth.cpp Splitter.hpp Splitter.cpp + Sqrt.hpp Stack.cpp Stack.hpp StridedSlice.hpp diff --git a/src/backends/reference/workloads/ElementwiseFunction.cpp b/src/backends/reference/workloads/ElementwiseFunction.cpp index 888037f9a6..5687cf5861 100644 --- a/src/backends/reference/workloads/ElementwiseFunction.cpp +++ b/src/backends/reference/workloads/ElementwiseFunction.cpp @@ -7,36 +7,56 @@ #include "Broadcast.hpp" #include <functional> #include "Minimum.hpp" - #include "Maximum.hpp" +#include "Abs.hpp" +#include "Exp.hpp" +#include "Rsqrt.hpp" +#include "Sqrt.hpp" + namespace armnn { template <typename Functor> -ElementwiseFunction<Functor>::ElementwiseFunction(const TensorShape& inShape0, - const TensorShape& inShape1, - const TensorShape& outShape, - armnn::Decoder<InType>& inData0, - armnn::Decoder<InType>& inData1, - armnn::Encoder<OutType>& outData) +ElementwiseBinaryFunction<Functor>::ElementwiseBinaryFunction(const TensorShape& inShape0, + const TensorShape& inShape1, + const TensorShape& outShape, + Decoder<InType>& inData0, + Decoder<InType>& inData1, + Encoder<OutType>& outData) { BroadcastLoop(inShape0, inShape1, outShape).Unroll(Functor(), 0, inData0, inData1, outData); } +template <typename Functor> +ElementwiseUnaryFunction<Functor>::ElementwiseUnaryFunction(const TensorShape& inShape, + const TensorShape& outShape, + Decoder<InType>& inData, + Encoder<OutType>& outData) +{ + BroadcastLoop(inShape, outShape).Unroll(Functor(), 0, inData, outData); +} + } //namespace armnn -template struct armnn::ElementwiseFunction<std::plus<float>>; -template struct armnn::ElementwiseFunction<std::minus<float>>; -template struct armnn::ElementwiseFunction<std::multiplies<float>>; -template struct armnn::ElementwiseFunction<std::divides<float>>; -template struct armnn::ElementwiseFunction<armnn::maximum<float>>; -template struct armnn::ElementwiseFunction<armnn::minimum<float>>; +template struct armnn::ElementwiseBinaryFunction<std::plus<float>>; +template struct armnn::ElementwiseBinaryFunction<std::minus<float>>; +template struct armnn::ElementwiseBinaryFunction<std::multiplies<float>>; +template struct armnn::ElementwiseBinaryFunction<std::divides<float>>; +template struct armnn::ElementwiseBinaryFunction<armnn::maximum<float>>; +template struct armnn::ElementwiseBinaryFunction<armnn::minimum<float>>; // Comparison -template struct armnn::ElementwiseFunction<std::equal_to<float>>; -template struct armnn::ElementwiseFunction<std::greater<float>>; -template struct armnn::ElementwiseFunction<std::greater_equal<float>>; -template struct armnn::ElementwiseFunction<std::less<float>>; -template struct armnn::ElementwiseFunction<std::less_equal<float>>; -template struct armnn::ElementwiseFunction<std::not_equal_to<float>>; +template struct armnn::ElementwiseBinaryFunction<std::equal_to<float>>; +template struct armnn::ElementwiseBinaryFunction<std::greater<float>>; +template struct armnn::ElementwiseBinaryFunction<std::greater_equal<float>>; +template struct armnn::ElementwiseBinaryFunction<std::less<float>>; +template struct armnn::ElementwiseBinaryFunction<std::less_equal<float>>; +template struct armnn::ElementwiseBinaryFunction<std::not_equal_to<float>>; + +// Unary +template struct armnn::ElementwiseUnaryFunction<armnn::abs<float>>; +template struct armnn::ElementwiseUnaryFunction<armnn::exp<float>>; +template struct armnn::ElementwiseUnaryFunction<std::negate<float>>; +template struct armnn::ElementwiseUnaryFunction<armnn::rsqrt<float>>; +template struct armnn::ElementwiseUnaryFunction<armnn::sqrt<float>>; diff --git a/src/backends/reference/workloads/ElementwiseFunction.hpp b/src/backends/reference/workloads/ElementwiseFunction.hpp index fd1fab0690..8259ba5ac7 100644 --- a/src/backends/reference/workloads/ElementwiseFunction.hpp +++ b/src/backends/reference/workloads/ElementwiseFunction.hpp @@ -12,17 +12,29 @@ namespace armnn { template <typename Functor> -struct ElementwiseFunction +struct ElementwiseBinaryFunction { using OutType = typename Functor::result_type; using InType = typename Functor::first_argument_type; - ElementwiseFunction(const TensorShape& inShape0, - const TensorShape& inShape1, - const TensorShape& outShape, - armnn::Decoder<InType>& inData0, - armnn::Decoder<InType>& inData1, - armnn::Encoder<OutType>& outData); + ElementwiseBinaryFunction(const TensorShape& inShape0, + const TensorShape& inShape1, + const TensorShape& outShape, + Decoder<InType>& inData0, + Decoder<InType>& inData1, + Encoder<OutType>& outData); +}; + +template <typename Functor> +struct ElementwiseUnaryFunction +{ + using OutType = typename Functor::result_type; + using InType = typename Functor::argument_type; + + ElementwiseUnaryFunction(const TensorShape& inShape, + const TensorShape& outShape, + Decoder<InType>& inData, + Encoder<OutType>& outData); }; } //namespace armnn diff --git a/src/backends/reference/workloads/Exp.hpp b/src/backends/reference/workloads/Exp.hpp new file mode 100644 index 0000000000..1a046728ba --- /dev/null +++ b/src/backends/reference/workloads/Exp.hpp @@ -0,0 +1,22 @@ +// +// Copyright © 2019 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include <iostream> + +namespace armnn +{ + template<typename T> +struct exp : public std::unary_function<T, T> + { + T + operator () (const T& inputData) const + { + return std::exp(inputData); + } + }; + +} //namespace armnn diff --git a/src/backends/reference/workloads/RefAbsWorkload.cpp b/src/backends/reference/workloads/RefAbsWorkload.cpp deleted file mode 100644 index 5c1f8c0c69..0000000000 --- a/src/backends/reference/workloads/RefAbsWorkload.cpp +++ /dev/null @@ -1,37 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "RefAbsWorkload.hpp" - -#include "Abs.hpp" -#include "Decoders.hpp" -#include "Encoders.hpp" -#include "RefWorkloadUtils.hpp" - -#include <Profiling.hpp> - -namespace armnn -{ - -void RefAbsWorkload::Execute() const -{ - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefAbsWorkload_Execute"); - - const TensorInfo& inputTensorInfo = GetTensorInfo(m_Data.m_Inputs[0]); - - std::unique_ptr<Decoder<float>> decoderPtr = MakeDecoder<float>(inputTensorInfo, m_Data.m_Inputs[0]->Map()); - Decoder<float>& decoder = *decoderPtr; - - const TensorInfo& outputTensorInfo = GetTensorInfo(m_Data.m_Outputs[0]); - - std::unique_ptr<Encoder<float>> encoderPtr = MakeEncoder<float>(outputTensorInfo, m_Data.m_Outputs[0]->Map()); - Encoder<float>& encoder = *encoderPtr; - - Abs(decoder, - encoder, - inputTensorInfo); -} - -} //namespace armnn diff --git a/src/backends/reference/workloads/RefAbsWorkload.hpp b/src/backends/reference/workloads/RefAbsWorkload.hpp deleted file mode 100644 index 68105556d5..0000000000 --- a/src/backends/reference/workloads/RefAbsWorkload.hpp +++ /dev/null @@ -1,21 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#pragma once - -#include <backendsCommon/Workload.hpp> -#include <backendsCommon/WorkloadData.hpp> - -namespace armnn -{ - -class RefAbsWorkload : public BaseWorkload<AbsQueueDescriptor> -{ -public: - using BaseWorkload<AbsQueueDescriptor>::BaseWorkload; - virtual void Execute() const override; -}; - -} //namespace armnn diff --git a/src/backends/reference/workloads/RefComparisonWorkload.cpp b/src/backends/reference/workloads/RefComparisonWorkload.cpp index 60446226be..52ad9a2879 100644 --- a/src/backends/reference/workloads/RefComparisonWorkload.cpp +++ b/src/backends/reference/workloads/RefComparisonWorkload.cpp @@ -52,12 +52,12 @@ void RefComparisonWorkload::Execute() const m_Input1->Reset(m_Data.m_Inputs[1]->Map()); m_Output->Reset(m_Data.m_Outputs[0]->Map()); - using EqualFunction = ElementwiseFunction<std::equal_to<InType>>; - using GreaterFunction = ElementwiseFunction<std::greater<InType>>; - using GreaterOrEqualFunction = ElementwiseFunction<std::greater_equal<InType>>; - using LessFunction = ElementwiseFunction<std::less<InType>>; - using LessOrEqualFunction = ElementwiseFunction<std::less_equal<InType>>; - using NotEqualFunction = ElementwiseFunction<std::not_equal_to<InType>>; + using EqualFunction = ElementwiseBinaryFunction<std::equal_to<InType>>; + using GreaterFunction = ElementwiseBinaryFunction<std::greater<InType>>; + using GreaterOrEqualFunction = ElementwiseBinaryFunction<std::greater_equal<InType>>; + using LessFunction = ElementwiseBinaryFunction<std::less<InType>>; + using LessOrEqualFunction = ElementwiseBinaryFunction<std::less_equal<InType>>; + using NotEqualFunction = ElementwiseBinaryFunction<std::not_equal_to<InType>>; switch (m_Data.m_Parameters.m_Operation) { diff --git a/src/backends/reference/workloads/RefElementwiseUnaryWorkload.cpp b/src/backends/reference/workloads/RefElementwiseUnaryWorkload.cpp new file mode 100644 index 0000000000..4fbb0d123f --- /dev/null +++ b/src/backends/reference/workloads/RefElementwiseUnaryWorkload.cpp @@ -0,0 +1,95 @@ +// +// Copyright © 2019 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "RefElementwiseUnaryWorkload.hpp" + +#include "Decoders.hpp" +#include "ElementwiseFunction.hpp" +#include "Encoders.hpp" +#include "RefWorkloadUtils.hpp" +#include "Abs.hpp" +#include "Exp.hpp" +#include "Rsqrt.hpp" +#include "Sqrt.hpp" + +#include <Profiling.hpp> + +#include <armnn/TypesUtils.hpp> + +#include <functional> + +namespace armnn +{ + +RefElementwiseUnaryWorkload::RefElementwiseUnaryWorkload(const ElementwiseUnaryQueueDescriptor& desc, + const WorkloadInfo& info) + : BaseWorkload<ElementwiseUnaryQueueDescriptor>(desc, info) +{} + +void RefElementwiseUnaryWorkload::PostAllocationConfigure() +{ + const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]); + const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]); + + m_Input = MakeDecoder<InType>(inputInfo); + + m_Output = MakeEncoder<OutType>(outputInfo); +} + +void RefElementwiseUnaryWorkload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefElementwiseUnaryWorkload_Execute"); + + const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]); + const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]); + + const TensorShape& inShape = inputInfo.GetShape(); + const TensorShape& outShape = outputInfo.GetShape(); + + m_Input->Reset(m_Data.m_Inputs[0]->Map()); + m_Output->Reset(m_Data.m_Outputs[0]->Map()); + + using AbsFunction = ElementwiseUnaryFunction<abs<InType>>; + using ExpFunction = ElementwiseUnaryFunction<exp<InType>>; + using NegFunction = ElementwiseUnaryFunction<std::negate<InType>>; + using RsqrtFunction = ElementwiseUnaryFunction<rsqrt<InType>>; + using SqrtFunction = ElementwiseUnaryFunction<sqrt<InType>>; + + switch (m_Data.m_Parameters.m_Operation) + { + case UnaryOperation::Abs: + { + AbsFunction(inShape, outShape, *m_Input, *m_Output); + break; + } + case UnaryOperation::Exp: + { + ExpFunction(inShape, outShape, *m_Input, *m_Output); + break; + } + case UnaryOperation::Neg: + { + NegFunction(inShape, outShape, *m_Input, *m_Output); + break; + } + case UnaryOperation::Rsqrt: + { + RsqrtFunction(inShape, outShape, *m_Input, *m_Output); + break; + } + case UnaryOperation::Sqrt: + { + SqrtFunction(inShape, outShape, *m_Input, *m_Output); + break; + } + default: + { + throw InvalidArgumentException(std::string("Unsupported unary operation ") + + GetUnaryOperationAsCString(m_Data.m_Parameters.m_Operation), CHECK_LOCATION()); + } + } +} + +} // namespace armnn diff --git a/src/backends/reference/workloads/RefElementwiseUnaryWorkload.hpp b/src/backends/reference/workloads/RefElementwiseUnaryWorkload.hpp new file mode 100644 index 0000000000..efb2865ebd --- /dev/null +++ b/src/backends/reference/workloads/RefElementwiseUnaryWorkload.hpp @@ -0,0 +1,33 @@ +// +// Copyright © 2019 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "BaseIterator.hpp" + +#include <backendsCommon/Workload.hpp> +#include <backendsCommon/WorkloadData.hpp> + +namespace armnn +{ + +class RefElementwiseUnaryWorkload : public BaseWorkload<ElementwiseUnaryQueueDescriptor> +{ +public: + using BaseWorkload<ElementwiseUnaryQueueDescriptor>::m_Data; + + RefElementwiseUnaryWorkload(const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info); + void PostAllocationConfigure() override; + void Execute() const override; + +private: + using InType = float; + using OutType = float; + + std::unique_ptr<Decoder<InType>> m_Input; + std::unique_ptr<Encoder<OutType>> m_Output; +}; + +} // namespace armnn diff --git a/src/backends/reference/workloads/RefElementwiseWorkload.cpp b/src/backends/reference/workloads/RefElementwiseWorkload.cpp index 7e02f032ef..18bf0a7ad9 100644 --- a/src/backends/reference/workloads/RefElementwiseWorkload.cpp +++ b/src/backends/reference/workloads/RefElementwiseWorkload.cpp @@ -53,12 +53,12 @@ void RefElementwiseWorkload<Functor, ParentDescriptor, DebugString>::Execute() c m_Input1->Reset(m_Data.m_Inputs[1]->Map()); m_Output->Reset(m_Data.m_Outputs[0]->Map()); - ElementwiseFunction<Functor>(inShape0, - inShape1, - outShape, - *m_Input0, - *m_Input1, - *m_Output); + ElementwiseBinaryFunction<Functor>(inShape0, + inShape1, + outShape, + *m_Input0, + *m_Input1, + *m_Output); } } //namespace armnn diff --git a/src/backends/reference/workloads/RefElementwiseWorkload.hpp b/src/backends/reference/workloads/RefElementwiseWorkload.hpp index ee0d80b172..264ddce2de 100644 --- a/src/backends/reference/workloads/RefElementwiseWorkload.hpp +++ b/src/backends/reference/workloads/RefElementwiseWorkload.hpp @@ -21,8 +21,8 @@ template <typename Functor, typename ParentDescriptor, typename armnn::StringMap class RefElementwiseWorkload : public BaseWorkload<ParentDescriptor> { public: - using InType = typename ElementwiseFunction<Functor>::InType; - using OutType = typename ElementwiseFunction<Functor>::OutType; + using InType = typename ElementwiseBinaryFunction<Functor>::InType; + using OutType = typename ElementwiseBinaryFunction<Functor>::OutType; using BaseWorkload<ParentDescriptor>::m_Data; RefElementwiseWorkload(const ParentDescriptor& descriptor, const WorkloadInfo& info); diff --git a/src/backends/reference/workloads/RefRsqrtWorkload.cpp b/src/backends/reference/workloads/RefRsqrtWorkload.cpp deleted file mode 100644 index fd6b9a3549..0000000000 --- a/src/backends/reference/workloads/RefRsqrtWorkload.cpp +++ /dev/null @@ -1,37 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "RefRsqrtWorkload.hpp" - -#include "Decoders.hpp" -#include "Encoders.hpp" -#include "RefWorkloadUtils.hpp" -#include "Rsqrt.hpp" - -#include <Profiling.hpp> - -namespace armnn -{ - -void RefRsqrtWorkload::Execute() const -{ - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefRsqrtWorkload_Execute"); - - const TensorInfo& inputTensorInfo = GetTensorInfo(m_Data.m_Inputs[0]); - - std::unique_ptr<Decoder<float>> decoderPtr = MakeDecoder<float>(inputTensorInfo, m_Data.m_Inputs[0]->Map()); - Decoder<float>& decoder = *decoderPtr; - - const TensorInfo& outputTensorInfo = GetTensorInfo(m_Data.m_Outputs[0]); - - std::unique_ptr<Encoder<float>> encoderPtr = MakeEncoder<float>(outputTensorInfo, m_Data.m_Outputs[0]->Map()); - Encoder<float>& encoder = *encoderPtr; - - Rsqrt(decoder, - encoder, - GetTensorInfo(m_Data.m_Inputs[0])); -} - -} //namespace armnn diff --git a/src/backends/reference/workloads/RefRsqrtWorkload.hpp b/src/backends/reference/workloads/RefRsqrtWorkload.hpp deleted file mode 100644 index 6c8ad5bc60..0000000000 --- a/src/backends/reference/workloads/RefRsqrtWorkload.hpp +++ /dev/null @@ -1,21 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#pragma once - -#include <backendsCommon/Workload.hpp> -#include <backendsCommon/WorkloadData.hpp> - -namespace armnn -{ - -class RefRsqrtWorkload : public BaseWorkload<RsqrtQueueDescriptor> -{ -public: - using BaseWorkload<RsqrtQueueDescriptor>::BaseWorkload; - virtual void Execute() const override; -}; - -} //namespace armnn diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp index 1f9ad4a19a..7034b67aa5 100644 --- a/src/backends/reference/workloads/RefWorkloads.hpp +++ b/src/backends/reference/workloads/RefWorkloads.hpp @@ -5,7 +5,6 @@ #pragma once -#include "Abs.hpp" #include "Activation.hpp" #include "ArgMinMax.hpp" #include "BatchNormImpl.hpp" @@ -15,7 +14,6 @@ #include "FullyConnected.hpp" #include "Gather.hpp" #include "Pooling2d.hpp" -#include "RefAbsWorkload.hpp" #include "RefActivationWorkload.hpp" #include "RefArgMinMaxWorkload.hpp" #include "RefBatchNormalizationWorkload.hpp" @@ -33,6 +31,7 @@ #include "RefDetectionPostProcessWorkload.hpp" #include "RefDequantizeWorkload.hpp" #include "RefElementwiseWorkload.hpp" +#include "RefElementwiseUnaryWorkload.hpp" #include "RefFullyConnectedWorkload.hpp" #include "RefFloorWorkload.hpp" #include "RefFakeQuantizationFloat32Workload.hpp" @@ -51,7 +50,6 @@ #include "RefReshapeWorkload.hpp" #include "RefResizeBilinearWorkload.hpp" #include "RefResizeWorkload.hpp" -#include "RefRsqrtWorkload.hpp" #include "RefSliceWorkload.hpp" #include "RefSplitterWorkload.hpp" #include "RefSoftmaxWorkload.hpp" diff --git a/src/backends/reference/workloads/Rsqrt.cpp b/src/backends/reference/workloads/Rsqrt.cpp deleted file mode 100644 index 5abc2c8f7b..0000000000 --- a/src/backends/reference/workloads/Rsqrt.cpp +++ /dev/null @@ -1,25 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "Rsqrt.hpp" - -#include <cmath> - -namespace armnn -{ - -void Rsqrt(Decoder<float>& in, - Encoder<float>& out, - const TensorInfo& tensorInfo) -{ - for (unsigned int i = 0; i < tensorInfo.GetNumElements(); ++i) - { - out[i]; - in[i]; - out.Set(1.f / sqrtf(in.Get())); - } -} - -} //namespace armnn
\ No newline at end of file diff --git a/src/backends/reference/workloads/Rsqrt.hpp b/src/backends/reference/workloads/Rsqrt.hpp index ffc6b18d13..47ebcf36f6 100644 --- a/src/backends/reference/workloads/Rsqrt.hpp +++ b/src/backends/reference/workloads/Rsqrt.hpp @@ -1,19 +1,22 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2019 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // -#include "BaseIterator.hpp" -#include <armnn/Tensor.hpp> -#include <armnn/Types.hpp> +#pragma once + +#include <iostream> namespace armnn { - -/// Performs the reciprocal squareroot function elementwise -/// on the inputs to give the outputs. -void Rsqrt(Decoder<float>& in, - Encoder<float>& out, - const TensorInfo& tensorInfo); + template<typename T> +struct rsqrt : public std::unary_function<T, T> + { + T + operator () (const T& inputData) const + { + return 1 / std::sqrt(inputData); + } + }; } //namespace armnn diff --git a/src/backends/reference/workloads/Sqrt.hpp b/src/backends/reference/workloads/Sqrt.hpp new file mode 100644 index 0000000000..e4ff6a4829 --- /dev/null +++ b/src/backends/reference/workloads/Sqrt.hpp @@ -0,0 +1,22 @@ +// +// Copyright © 2019 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include <iostream> + +namespace armnn +{ + template<typename T> +struct sqrt : public std::unary_function<T, T> + { + T + operator () (const T& inputData) const + { + return std::sqrt(inputData); + } + }; + +} //namespace armnn |