From 4a3c61091037e7e86e8b03bb060d8c1ab82731a9 Mon Sep 17 00:00:00 2001 From: josh minor Date: Mon, 6 Jan 2020 16:40:46 -0600 Subject: IVGCVSW-4259 Add frontend and reference workload for UnaryOperationLayer * Added new layer named ElementwiseUnary * Deprecated existing Abs/Rsqrt layer functions * Updated existing Abs/Rsqrt test infrastructure to use new layer * Added boilerplate for new Exp,Neg,Sqrt elemwise op layers * AbsQuantize test removed pending future commit * Serialization support added !android-nn-driver:2550 Change-Id: Ic595c645925e17b45db568187fd05646daf2e87f Signed-off-by: josh minor --- src/backends/backendsCommon/LayerSupportBase.cpp | 23 +++ src/backends/backendsCommon/LayerSupportBase.hpp | 7 + src/backends/backendsCommon/WorkloadData.cpp | 24 +++ src/backends/backendsCommon/WorkloadData.hpp | 5 + src/backends/backendsCommon/WorkloadFactory.cpp | 37 ++--- src/backends/backendsCommon/WorkloadFactory.hpp | 5 + .../backendsCommon/WorkloadFactoryBase.hpp | 16 ++ src/backends/backendsCommon/common.mk | 1 + .../backendsCommon/test/AbsEndToEndTestImpl.hpp | 65 -------- src/backends/backendsCommon/test/CMakeLists.txt | 4 +- .../test/ElementwiseUnaryEndToEndTestImpl.hpp | 77 ++++++++++ .../test/IsLayerSupportedTestImpl.hpp | 6 +- src/backends/backendsCommon/test/LayerTests.hpp | 1 + .../backendsCommon/test/layerTests/AbsTestImpl.cpp | 164 ++++---------------- .../test/layerTests/ElementwiseTestImpl.hpp | 3 +- .../test/layerTests/ElementwiseUnaryTestImpl.cpp | 14 ++ .../test/layerTests/ElementwiseUnaryTestImpl.hpp | 113 ++++++++++++++ .../test/layerTests/RsqrtTestImpl.cpp | 167 +++++---------------- 18 files changed, 377 insertions(+), 355 deletions(-) delete mode 100644 src/backends/backendsCommon/test/AbsEndToEndTestImpl.hpp create mode 100644 src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp create mode 100644 src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.cpp create mode 100644 src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.hpp (limited to 'src/backends/backendsCommon') diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp index 8332774202..b19356f955 100644 --- a/src/backends/backendsCommon/LayerSupportBase.cpp +++ b/src/backends/backendsCommon/LayerSupportBase.cpp @@ -3,7 +3,10 @@ // SPDX-License-Identifier: MIT // +#include +#include #include +#include #include @@ -195,6 +198,26 @@ bool LayerSupportBase::IsDivisionSupported(const TensorInfo& /*input0*/, return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } +bool LayerSupportBase::IsElementwiseUnarySupported(const TensorInfo& input, + const TensorInfo& output, + const ElementwiseUnaryDescriptor& descriptor, + Optional reasonIfUnsupported) const +{ + if (descriptor.m_Operation == UnaryOperation::Abs) + { + ARMNN_NO_DEPRECATE_WARN_BEGIN + return IsAbsSupported(input, output, reasonIfUnsupported); + ARMNN_NO_DEPRECATE_WARN_END + } + else if (descriptor.m_Operation == UnaryOperation::Rsqrt) + { + ARMNN_NO_DEPRECATE_WARN_BEGIN + return IsRsqrtSupported(input, output, reasonIfUnsupported); + ARMNN_NO_DEPRECATE_WARN_END + } + return false; +} + bool LayerSupportBase::IsEqualSupported(const armnn::TensorInfo& /*input0*/, const armnn::TensorInfo& /*input1*/, const armnn::TensorInfo& /*output*/, diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp index 60f94d0c4d..7a65eb55ed 100644 --- a/src/backends/backendsCommon/LayerSupportBase.hpp +++ b/src/backends/backendsCommon/LayerSupportBase.hpp @@ -13,6 +13,7 @@ namespace armnn class LayerSupportBase : public ILayerSupport { public: + ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead") bool IsAbsSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; @@ -119,6 +120,11 @@ public: const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsElementwiseUnarySupported(const TensorInfo& input, + const TensorInfo& output, + const ElementwiseUnaryDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const override; + ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead") bool IsEqualSupported(const TensorInfo& input0, const TensorInfo& input1, @@ -278,6 +284,7 @@ public: const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; + ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead") bool IsRsqrtSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp index fa5c6fe38e..d2ab41ef40 100644 --- a/src/backends/backendsCommon/WorkloadData.cpp +++ b/src/backends/backendsCommon/WorkloadData.cpp @@ -2969,4 +2969,28 @@ void ComparisonQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const } } +void ElementwiseUnaryQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + const std::string descriptorName{"ElementwiseUnaryQueueDescriptor"}; + + ValidateNumInputs(workloadInfo, descriptorName, 1); + ValidateNumOutputs(workloadInfo, descriptorName, 1); + + const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0]; + const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0]; + + ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output"); + + std::vector supportedTypes = + { + DataType::Float16, + DataType::Float32, + DataType::QAsymmU8, + DataType::QSymmS16 + }; + + ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName); + ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output"); +} + } // namespace armnn diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp index 43be3cd6e1..c5fcf15c3b 100644 --- a/src/backends/backendsCommon/WorkloadData.hpp +++ b/src/backends/backendsCommon/WorkloadData.hpp @@ -560,4 +560,9 @@ struct ComparisonQueueDescriptor : QueueDescriptorWithParameters +{ + void Validate(const WorkloadInfo& workloadInfo) const; +}; + } // namespace armnn diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp index 54ae585a82..acb73b589d 100644 --- a/src/backends/backendsCommon/WorkloadFactory.cpp +++ b/src/backends/backendsCommon/WorkloadFactory.cpp @@ -68,15 +68,6 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, switch(layer.GetType()) { - case LayerType::Abs: - { - const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); - const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsAbsSupported(OverrideDataType(input, dataType), - OverrideDataType(output, dataType), - reason); - break; - } case LayerType::Activation: { auto cLayer = boost::polymorphic_downcast(&layer); @@ -294,6 +285,19 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, reason); break; } + case LayerType::ElementwiseUnary: + { + auto cLayer = boost::polymorphic_downcast(&layer); + + const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); + const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); + + result = layerSupportObject->IsElementwiseUnarySupported(OverrideDataType(input, dataType), + OverrideDataType(output, dataType), + cLayer->GetParameters(), + reason); + break; + } case LayerType::FakeQuantization: { auto cLayer = boost::polymorphic_downcast(&layer); @@ -807,15 +811,6 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, reason); break; } - case LayerType::Rsqrt: - { - const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); - const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject->IsRsqrtSupported(OverrideDataType(input, dataType), - OverrideDataType(output, dataType), - reason); - break; - } case LayerType::Slice: { auto cLayer = boost::polymorphic_downcast(&layer); @@ -1182,6 +1177,12 @@ std::unique_ptr IWorkloadFactory::CreateDivision(const DivisionQueueD return std::unique_ptr(); } +std::unique_ptr IWorkloadFactory::CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& /*desc*/, + const WorkloadInfo& /*info*/) const +{ + return std::unique_ptr(); +} + std::unique_ptr IWorkloadFactory::CreateEqual(const EqualQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*Info*/) const { diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp index 6e6478fd6a..e1cdff6abe 100644 --- a/src/backends/backendsCommon/WorkloadFactory.hpp +++ b/src/backends/backendsCommon/WorkloadFactory.hpp @@ -51,6 +51,7 @@ public: DataLayout dataLayout, const bool IsMemoryManaged = true) const = 0; + ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead") virtual std::unique_ptr CreateAbs(const AbsQueueDescriptor& descriptor, const WorkloadInfo& info) const; @@ -105,6 +106,9 @@ public: virtual std::unique_ptr CreateDivision(const DivisionQueueDescriptor& descriptor, const WorkloadInfo& info) const; + virtual std::unique_ptr CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor, + const WorkloadInfo& Info) const; + ARMNN_DEPRECATED_MSG("Use CreateComparison instead") virtual std::unique_ptr CreateEqual(const EqualQueueDescriptor& descriptor, const WorkloadInfo& Info) const; @@ -200,6 +204,7 @@ public: virtual std::unique_ptr CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor, const WorkloadInfo& info) const; + ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead") virtual std::unique_ptr CreateRsqrt(const RsqrtQueueDescriptor& descriptor, const WorkloadInfo& info) const; diff --git a/src/backends/backendsCommon/WorkloadFactoryBase.hpp b/src/backends/backendsCommon/WorkloadFactoryBase.hpp index 1947c6935b..9602cc3b6c 100644 --- a/src/backends/backendsCommon/WorkloadFactoryBase.hpp +++ b/src/backends/backendsCommon/WorkloadFactoryBase.hpp @@ -106,6 +106,22 @@ public: const WorkloadInfo& /*info*/) const override { return nullptr; } + std::unique_ptr CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor, + const WorkloadInfo& info) const override + { + if (descriptor.m_Parameters.m_Operation == UnaryOperation::Abs) + { + AbsQueueDescriptor absDescriptor; + return CreateAbs(absDescriptor, info); + } + else if (descriptor.m_Parameters.m_Operation == UnaryOperation::Rsqrt) + { + RsqrtQueueDescriptor rsqrtDescriptor; + return CreateRsqrt(rsqrtDescriptor, info); + } + return nullptr; + } + std::unique_ptr CreateFakeQuantization(const FakeQuantizationQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const override { return nullptr; } diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk index 4461cd68b1..56a21b386c 100644 --- a/src/backends/backendsCommon/common.mk +++ b/src/backends/backendsCommon/common.mk @@ -50,6 +50,7 @@ COMMON_TEST_SOURCES := \ test/layerTests/DepthToSpaceTestImpl.cpp \ test/layerTests/DequantizeTestImpl.cpp \ test/layerTests/DivisionTestImpl.cpp \ + test/layerTests/ElementwiseUnaryTestImpl.cpp \ test/layerTests/FakeQuantizationTestImpl.cpp \ test/layerTests/FloorTestImpl.cpp \ test/layerTests/FullyConnectedTestImpl.cpp \ diff --git a/src/backends/backendsCommon/test/AbsEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/AbsEndToEndTestImpl.hpp deleted file mode 100644 index 602ccd6a19..0000000000 --- a/src/backends/backendsCommon/test/AbsEndToEndTestImpl.hpp +++ /dev/null @@ -1,65 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#pragma once - -#include "CommonTestUtils.hpp" - -#include -#include - - -namespace -{ - -armnn::INetworkPtr CreateAbsNetwork(const armnn::TensorInfo& tensorInfo) -{ - armnn::INetworkPtr network(armnn::INetwork::Create()); - - armnn::IConnectableLayer* inputLayer = network->AddInputLayer(0, "input"); - armnn::IConnectableLayer* absLayer = network->AddAbsLayer("abs"); - armnn::IConnectableLayer* outputLayer = network->AddOutputLayer(0, "output"); - - Connect(inputLayer, absLayer, tensorInfo, 0, 0); - Connect(absLayer, outputLayer, tensorInfo, 0, 0); - - return network; -} - -} // anonymous namespace - -template> -void AbsEndToEnd(const std::vector& backends) -{ - using namespace armnn; - - const float qScale = IsQuantizedType() ? 0.25f : 1.0f; - const int32_t qOffset = IsQuantizedType() ? 50 : 0; - - TensorInfo tensorInfo({ 1, 1, 2, 3 }, ArmnnType, qScale, qOffset); - - std::vector inputData = - { - -1.f, 2.f, -3.f, - 4.f, -5.f, 6.f - }; - - std::vector expectedOutputData = - { - 1.f, 2.f, 3.f, - 4.f, 5.f, 6.f - }; - - // quantize data - std::vector qInputData = armnnUtils::QuantizedVector(inputData, qScale, qOffset); - std::vector qExpectedOutputData = armnnUtils::QuantizedVector(expectedOutputData, qScale, qOffset); - - INetworkPtr network = CreateAbsNetwork(tensorInfo); - - EndToEndLayerTestImpl(std::move(network), - { { 0, qInputData } }, - { { 0, qExpectedOutputData } }, - backends); -} diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt index 82df782317..4716bd47e4 100644 --- a/src/backends/backendsCommon/test/CMakeLists.txt +++ b/src/backends/backendsCommon/test/CMakeLists.txt @@ -4,7 +4,6 @@ # list(APPEND armnnBackendsCommonUnitTests_sources - AbsEndToEndTestImpl.hpp ActivationFixture.hpp ArgMinMaxEndToEndTestImpl.hpp BackendIdTests.cpp @@ -19,6 +18,7 @@ list(APPEND armnnBackendsCommonUnitTests_sources DetectionPostProcessEndToEndTestImpl.hpp DynamicBackendTests.cpp DynamicBackendTests.hpp + ElementwiseUnaryEndToEndTestImpl.hpp EndToEndTestImpl.hpp GatherEndToEndTestImpl.hpp InstanceNormalizationEndToEndTestImpl.cpp @@ -81,6 +81,8 @@ list(APPEND armnnBackendsCommonUnitTests_sources layerTests/DivisionTestImpl.cpp layerTests/DivisionTestImpl.hpp layerTests/ElementwiseTestImpl.hpp + layerTests/ElementwiseUnaryTestImpl.cpp + layerTests/ElementwiseUnaryTestImpl.hpp layerTests/FakeQuantizationTestImpl.cpp layerTests/FakeQuantizationTestImpl.hpp layerTests/FloorTestImpl.cpp diff --git a/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp new file mode 100644 index 0000000000..4c93735bc8 --- /dev/null +++ b/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp @@ -0,0 +1,77 @@ +// +// Copyright © 2019 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include "CommonTestUtils.hpp" + +#include + +#include + +#include + +#include + +namespace +{ + +template +INetworkPtr CreateElementwiseUnaryNetwork(const TensorShape& inputShape, + const TensorShape& outputShape, + UnaryOperation operation, + const float qScale = 1.0f, + const int32_t qOffset = 0) +{ + using namespace armnn; + + INetworkPtr net(INetwork::Create()); + + ElementwiseUnaryDescriptor descriptor(operation); + IConnectableLayer* elementwiseUnaryLayer = net->AddElementwiseUnaryLayer(descriptor, "elementwiseUnary"); + + TensorInfo inputTensorInfo(inputShape, ArmnnTypeInput, qScale, qOffset); + IConnectableLayer* input = net->AddInputLayer(boost::numeric_cast(0)); + Connect(input, elementwiseUnaryLayer, inputTensorInfo, 0, 0); + + TensorInfo outputTensorInfo(outputShape, ArmnnTypeInput, qScale, qOffset); + IConnectableLayer* output = net->AddOutputLayer(0, "output"); + Connect(elementwiseUnaryLayer, output, outputTensorInfo, 0, 0); + + return net; +} + +template> +void ElementwiseUnarySimpleEndToEnd(const std::vector& backends, + UnaryOperation operation, + const std::vector expectedOutput) +{ + using namespace armnn; + + const float qScale = IsQuantizedType() ? 0.25f : 1.0f; + const int32_t qOffset = IsQuantizedType() ? 50 : 0; + + const TensorShape& inputShape = { 2, 2, 2, 2 }; + const TensorShape& outputShape = { 2, 2, 2, 2 }; + + // Builds up the structure of the network + INetworkPtr net = CreateElementwiseUnaryNetwork(inputShape, outputShape, operation, qScale, qOffset); + + BOOST_TEST_CHECKPOINT("create a network"); + + const std::vector input({ 1, -1, 1, 1, 5, -5, 5, 5, + -3, 3, 3, 3, 4, 4, -4, 4 }); + + // quantize data + std::vector qInputData = armnnUtils::QuantizedVector(input, qScale, qOffset); + std::vector qExpectedOutput = armnnUtils::QuantizedVector(expectedOutput, qScale, qOffset); + + std::map> inputTensorData = {{ 0, qInputData }}; + std::map> expectedOutputData = {{ 0, qExpectedOutput }}; + + EndToEndLayerTestImpl(move(net), inputTensorData, expectedOutputData, backends); +} + +} // anonymous namespace diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp index 031210f1fc..e4ce7407bf 100644 --- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp +++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp @@ -427,8 +427,6 @@ template struct LayerTypePolicy; // Every entry in the armnn::LayerType enum must be accounted for below. -DECLARE_LAYER_POLICY_1_PARAM(Abs) - DECLARE_LAYER_POLICY_2_PARAM(Activation) DECLARE_LAYER_POLICY_1_PARAM(Addition) @@ -465,6 +463,8 @@ DECLARE_LAYER_POLICY_1_PARAM(Dequantize) DECLARE_LAYER_POLICY_2_PARAM(DetectionPostProcess) +DECLARE_LAYER_POLICY_2_PARAM(ElementwiseUnary) + DECLARE_LAYER_POLICY_2_PARAM(FakeQuantization) DECLARE_LAYER_POLICY_1_PARAM(Floor) @@ -517,8 +517,6 @@ DECLARE_LAYER_POLICY_2_PARAM(Resize) DECLARE_LAYER_POLICY_2_PARAM(Reshape) -DECLARE_LAYER_POLICY_1_PARAM(Rsqrt) - DECLARE_LAYER_POLICY_2_PARAM(Slice) DECLARE_LAYER_POLICY_2_PARAM(Softmax) diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp index 05c307ead2..eba7944cc3 100644 --- a/src/backends/backendsCommon/test/LayerTests.hpp +++ b/src/backends/backendsCommon/test/LayerTests.hpp @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include diff --git a/src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp index cc57893439..7706809e8b 100644 --- a/src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp @@ -4,85 +4,15 @@ // #include "AbsTestImpl.hpp" +#include "ElementwiseUnaryTestImpl.hpp" -#include -#include -#include - -#include - -namespace -{ - -template> -LayerTestResult Abs2dTestCommon( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::TensorInfo inputTensorInfo, - const armnn::TensorInfo outputTensorInfo, - const std::vector& inputValues, - const std::vector& expectedOutputValues) -{ - boost::ignore_unused(memoryManager); - auto inputTensor = MakeTensor(inputTensorInfo, ConvertToDataType(inputValues,inputTensorInfo)); - - LayerTestResult result(outputTensorInfo); - - result.outputExpected = MakeTensor(outputTensorInfo, - ConvertToDataType(expectedOutputValues,outputTensorInfo)); - - std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); - std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - - armnn::AbsQueueDescriptor descriptor; - - armnn::WorkloadInfo info; - - AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); - AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); - - std::unique_ptr workload = workloadFactory.CreateAbs(descriptor, info); - - inputHandle->Allocate(); - outputHandle->Allocate(); - - CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]); - - workload->PostAllocationConfigure(); - workload->Execute(); - - CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get()); - - return result; -} - -} // anonymous namespace - template LayerTestResult Abs2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - const armnn::TensorShape inputShape{ 2, 2 }; - const armnn::TensorShape outputShape{ 2, 2 }; - - float qScale = 0.0625f; - int32_t qOffset = 64; - - if (ArmnnType == armnn::DataType::QSymmS16) - { - qScale = 0.1f; - qOffset = 0; - } - - armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); - inputTensorInfo.SetQuantizationScale(qScale); - inputTensorInfo.SetQuantizationOffset(qOffset); - - armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType); - outputTensorInfo.SetQuantizationScale(qScale); - outputTensorInfo.SetQuantizationOffset(qOffset); + const unsigned int inputShape[] = { 2, 2 }; std::vector inputValues { @@ -98,9 +28,14 @@ LayerTestResult Abs2dTest( std::vector expectedOutputValues(inputValues.size()); std::transform(inputValues.begin(), inputValues.end(), expectedOutputValues.begin(), f); - return Abs2dTestCommon(workloadFactory, memoryManager, - inputTensorInfo, outputTensorInfo, - inputValues, expectedOutputValues); + return ElementwiseUnaryTestHelper<2, ArmnnType>( + workloadFactory, + memoryManager, + armnn::UnaryOperation::Abs, + inputShape, + inputValues, + inputShape, + expectedOutputValues); } template @@ -108,27 +43,7 @@ LayerTestResult Abs3dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - boost::ignore_unused(memoryManager); - - const armnn::TensorShape inputShape{ 3, 1, 2 }; - const armnn::TensorShape outputShape{ 3, 1, 2 }; - - float qScale = 0.0625f; - int32_t qOffset = 64; - - if (ArmnnType == armnn::DataType::QSymmS16) - { - qScale = 0.1f; - qOffset = 0; - } - - armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); - inputTensorInfo.SetQuantizationScale(qScale); - inputTensorInfo.SetQuantizationOffset(qOffset); - - armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType); - outputTensorInfo.SetQuantizationScale(qScale); - outputTensorInfo.SetQuantizationOffset(qOffset); + const unsigned int inputShape[] = { 3, 1, 2 }; std::vector inputValues { @@ -143,35 +58,14 @@ LayerTestResult Abs3dTest( std::vectorexpectedOutputValues(inputValues.size()); std::transform(inputValues.begin(), inputValues.end(), expectedOutputValues.begin(), f); - auto inputTensor = MakeTensor(inputTensorInfo, ConvertToDataType(inputValues,inputTensorInfo)); - - LayerTestResult result(outputTensorInfo); - result.outputExpected = MakeTensor(outputTensorInfo, - ConvertToDataType(expectedOutputValues,outputTensorInfo)); - - std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); - std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - - armnn::AbsQueueDescriptor descriptor; - - armnn::WorkloadInfo info; - - AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); - AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); - - std::unique_ptr workload = workloadFactory.CreateAbs(descriptor, info); - - inputHandle->Allocate(); - outputHandle->Allocate(); - - CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]); - - workload->PostAllocationConfigure(); - workload->Execute(); - - CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get()); - - return result; + return ElementwiseUnaryTestHelper<3, ArmnnType>( + workloadFactory, + memoryManager, + armnn::UnaryOperation::Abs, + inputShape, + inputValues, + inputShape, + expectedOutputValues); } template @@ -179,14 +73,7 @@ LayerTestResult AbsZeroTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - const armnn::TensorShape inputShape{ 1, 2 }; - const armnn::TensorShape outputShape{ 1, 2 }; - - armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); - inputTensorInfo.SetQuantizationScale(0.1f); - - armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType); - outputTensorInfo.SetQuantizationScale(0.1f); + const unsigned int inputShape[] = { 1, 2 }; std::vector inputValues { @@ -198,9 +85,14 @@ LayerTestResult AbsZeroTest( 0.f, 0.f }; - return Abs2dTestCommon(workloadFactory, memoryManager, - inputTensorInfo, outputTensorInfo, - inputValues, expectedOutputValues); + return ElementwiseUnaryTestHelper<2, ArmnnType>( + workloadFactory, + memoryManager, + armnn::UnaryOperation::Abs, + inputShape, + inputValues, + inputShape, + expectedOutputValues); } // diff --git a/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp index c0a779c0e6..cbbe140843 100644 --- a/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp @@ -15,6 +15,7 @@ #include #include +#include #include #include @@ -202,4 +203,4 @@ LayerTestResult ElementwiseTestHelper( outValues, quantScale, quantOffset); -} +} \ No newline at end of file diff --git a/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.cpp new file mode 100644 index 0000000000..a2c88a62e7 --- /dev/null +++ b/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.cpp @@ -0,0 +1,14 @@ +// +// Copyright © 2019 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "ElementwiseUnaryTestImpl.hpp" + +std::unique_ptr CreateWorkload( + const armnn::IWorkloadFactory& workloadFactory, + const armnn::WorkloadInfo& info, + const armnn::ElementwiseUnaryQueueDescriptor& descriptor) +{ + return workloadFactory.CreateElementwiseUnary(descriptor, info); +} \ No newline at end of file diff --git a/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.hpp new file mode 100644 index 0000000000..bea4ec205e --- /dev/null +++ b/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.hpp @@ -0,0 +1,113 @@ +// +// Copyright © 2019 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "LayerTestResult.hpp" + +#include + +#include + +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include + +std::unique_ptr CreateWorkload( + const armnn::IWorkloadFactory& workloadFactory, + const armnn::WorkloadInfo& info, + const armnn::ElementwiseUnaryQueueDescriptor& descriptor); + +template > +LayerTestResult ElementwiseUnaryTestHelper( + armnn::IWorkloadFactory & workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager, + armnn::UnaryOperation op, + const unsigned int shape[NumDims], + std::vector values, + float quantScale, + int quantOffset, + const unsigned int outShape[NumDims], + std::vector outValues, + float outQuantScale, + int outQuantOffset) +{ + armnn::TensorInfo inputTensorInfo{NumDims, shape, ArmnnType}; + armnn::TensorInfo outputTensorInfo{NumDims, outShape, ArmnnType}; + + inputTensorInfo.SetQuantizationScale(quantScale); + inputTensorInfo.SetQuantizationOffset(quantOffset); + + outputTensorInfo.SetQuantizationScale(outQuantScale); + outputTensorInfo.SetQuantizationOffset(outQuantOffset); + + auto input = MakeTensor(inputTensorInfo, ConvertToDataType(values, inputTensorInfo)); + + LayerTestResult ret(outputTensorInfo); + + std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::ElementwiseUnaryDescriptor desc(op); + armnn::ElementwiseUnaryQueueDescriptor qDesc; + qDesc.m_Parameters = desc; + armnn::WorkloadInfo info; + AddInputToWorkload(qDesc, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(qDesc, info, outputTensorInfo, outputHandle.get()); + auto workload = CreateWorkload(workloadFactory, info, qDesc); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), input.origin()); + + workload->PostAllocationConfigure(); + ExecuteWorkload(*workload, memoryManager); + + CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get()); + + ret.outputExpected = MakeTensor(outputTensorInfo, ConvertToDataType(outValues, + inputTensorInfo)); + return ret; +} + +template > +LayerTestResult ElementwiseUnaryTestHelper( + armnn::IWorkloadFactory & workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager, + armnn::UnaryOperation op, + const unsigned int shape[NumDims], + std::vector values, + const unsigned int outShape[NumDims], + std::vector outValues, + float quantScale = 1.0f, + int quantOffset = 0) +{ + return ElementwiseUnaryTestHelper( + workloadFactory, + memoryManager, + op, + shape, + values, + quantScale, + quantOffset, + outShape, + outValues, + quantScale, + quantOffset); +} \ No newline at end of file diff --git a/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp index db928cf2e0..ca423835dc 100644 --- a/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp @@ -4,76 +4,15 @@ // #include "ReshapeTestImpl.hpp" +#include "ElementwiseUnaryTestImpl.hpp" -#include -#include -#include - -#include - -namespace -{ - -template> -LayerTestResult Rsqrt2dTestCommon( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::TensorInfo inputTensorInfo, - const armnn::TensorInfo outputTensorInfo, - const std::vector& inputValues, - const std::vector& expectedOutputValues) -{ - boost::ignore_unused(memoryManager); - auto inputTensor = MakeTensor(inputTensorInfo, ConvertToDataType(inputValues,inputTensorInfo)); - - LayerTestResult result(outputTensorInfo); - - result.outputExpected = MakeTensor(outputTensorInfo, - ConvertToDataType(expectedOutputValues,outputTensorInfo)); - - std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); - std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - - armnn::RsqrtQueueDescriptor descriptor; - - armnn::WorkloadInfo info; - - AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); - AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); - - std::unique_ptr workload = workloadFactory.CreateRsqrt(descriptor, info); - - inputHandle->Allocate(); - outputHandle->Allocate(); - - CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]); - - workload->PostAllocationConfigure(); - workload->Execute(); - - CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get()); - - return result; -} - -} // anonymous namespace - template LayerTestResult Rsqrt2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - const armnn::TensorShape inputShape{ 2, 2 }; - const armnn::TensorShape outputShape{ 2, 2 }; - - armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); - inputTensorInfo.SetQuantizationScale(0.1f); - inputTensorInfo.SetQuantizationOffset(0); - - armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType); - outputTensorInfo.SetQuantizationScale(0.1f); - outputTensorInfo.SetQuantizationOffset(0); + const unsigned int inputShape[] = { 2, 2 }; std::vector inputValues { @@ -87,9 +26,14 @@ LayerTestResult Rsqrt2dTest( 0.25f, 0.2f }; - return Rsqrt2dTestCommon(workloadFactory, memoryManager, - inputTensorInfo, outputTensorInfo, - inputValues, expectedOutputValues); + return ElementwiseUnaryTestHelper<2, ArmnnType>( + workloadFactory, + memoryManager, + armnn::UnaryOperation::Rsqrt, + inputShape, + inputValues, + inputShape, + expectedOutputValues); } template @@ -97,17 +41,7 @@ LayerTestResult Rsqrt3dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - boost::ignore_unused(memoryManager); - const armnn::TensorShape inputShape{ 3, 1, 2 }; - const armnn::TensorShape outputShape{ 3, 1, 2 }; - - armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); - inputTensorInfo.SetQuantizationScale(0.1f); - inputTensorInfo.SetQuantizationOffset(0); - - armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType); - outputTensorInfo.SetQuantizationScale(0.1f); - outputTensorInfo.SetQuantizationOffset(0); + const unsigned int inputShape[] = { 3, 1, 2 }; std::vector inputValues { @@ -121,35 +55,14 @@ LayerTestResult Rsqrt3dTest( 0.2f, 0.125f, 0.1f }; - auto inputTensor = MakeTensor(inputTensorInfo, ConvertToDataType(inputValues,inputTensorInfo)); - - LayerTestResult result(outputTensorInfo); - result.outputExpected = MakeTensor(outputTensorInfo, - ConvertToDataType(expectedOutputValues,outputTensorInfo)); - - std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); - std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - - armnn::RsqrtQueueDescriptor descriptor; - - armnn::WorkloadInfo info; - - AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); - AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); - - std::unique_ptr workload = workloadFactory.CreateRsqrt(descriptor, info); - - inputHandle->Allocate(); - outputHandle->Allocate(); - - CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]); - - workload->PostAllocationConfigure(); - workload->Execute(); - - CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get()); - - return result; + return ElementwiseUnaryTestHelper<3, ArmnnType>( + workloadFactory, + memoryManager, + armnn::UnaryOperation::Rsqrt, + inputShape, + inputValues, + inputShape, + expectedOutputValues); } template @@ -157,14 +70,7 @@ LayerTestResult RsqrtZeroTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - const armnn::TensorShape inputShape{ 1, 2 }; - const armnn::TensorShape outputShape{ 1, 2 }; - - armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); - inputTensorInfo.SetQuantizationScale(0.1f); - - armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType); - outputTensorInfo.SetQuantizationScale(0.1f); + const unsigned int inputShape[] = { 1, 2 }; std::vector inputValues { @@ -176,9 +82,14 @@ LayerTestResult RsqrtZeroTest( INFINITY, -INFINITY }; - return Rsqrt2dTestCommon(workloadFactory, memoryManager, - inputTensorInfo, outputTensorInfo, - inputValues, expectedOutputValues); + return ElementwiseUnaryTestHelper<2, ArmnnType>( + workloadFactory, + memoryManager, + armnn::UnaryOperation::Rsqrt, + inputShape, + inputValues, + inputShape, + expectedOutputValues); } template @@ -186,16 +97,7 @@ LayerTestResult RsqrtNegativeTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - const armnn::TensorShape inputShape{ 1, 2 }; - const armnn::TensorShape outputShape{ 1, 2 }; - - armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); - inputTensorInfo.SetQuantizationScale(0.1f); - inputTensorInfo.SetQuantizationOffset(0); - - armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType); - outputTensorInfo.SetQuantizationScale(0.1f); - outputTensorInfo.SetQuantizationOffset(0); + const unsigned int inputShape[] = { 1, 2 }; std::vector inputValues { @@ -207,9 +109,14 @@ LayerTestResult RsqrtNegativeTest( -NAN, -NAN }; - return Rsqrt2dTestCommon(workloadFactory, memoryManager, - inputTensorInfo, outputTensorInfo, - inputValues, expectedOutputValues); + return ElementwiseUnaryTestHelper<2, ArmnnType>( + workloadFactory, + memoryManager, + armnn::UnaryOperation::Rsqrt, + inputShape, + inputValues, + inputShape, + expectedOutputValues); } // -- cgit v1.2.1