aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon/test
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/backendsCommon/test')
-rw-r--r--src/backends/backendsCommon/test/AbsEndToEndTestImpl.hpp65
-rw-r--r--src/backends/backendsCommon/test/CMakeLists.txt4
-rw-r--r--src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp77
-rw-r--r--src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp6
-rw-r--r--src/backends/backendsCommon/test/LayerTests.hpp1
-rw-r--r--src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp164
-rw-r--r--src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp3
-rw-r--r--src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.cpp14
-rw-r--r--src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.hpp113
-rw-r--r--src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp167
10 files changed, 277 insertions, 337 deletions
diff --git a/src/backends/backendsCommon/test/AbsEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/AbsEndToEndTestImpl.hpp
deleted file mode 100644
index 602ccd6a19..0000000000
--- a/src/backends/backendsCommon/test/AbsEndToEndTestImpl.hpp
+++ /dev/null
@@ -1,65 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "CommonTestUtils.hpp"
-
-#include <QuantizeHelper.hpp>
-#include <ResolveType.hpp>
-
-
-namespace
-{
-
-armnn::INetworkPtr CreateAbsNetwork(const armnn::TensorInfo& tensorInfo)
-{
- armnn::INetworkPtr network(armnn::INetwork::Create());
-
- armnn::IConnectableLayer* inputLayer = network->AddInputLayer(0, "input");
- armnn::IConnectableLayer* absLayer = network->AddAbsLayer("abs");
- armnn::IConnectableLayer* outputLayer = network->AddOutputLayer(0, "output");
-
- Connect(inputLayer, absLayer, tensorInfo, 0, 0);
- Connect(absLayer, outputLayer, tensorInfo, 0, 0);
-
- return network;
-}
-
-} // anonymous namespace
-
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-void AbsEndToEnd(const std::vector<armnn::BackendId>& backends)
-{
- using namespace armnn;
-
- const float qScale = IsQuantizedType<T>() ? 0.25f : 1.0f;
- const int32_t qOffset = IsQuantizedType<T>() ? 50 : 0;
-
- TensorInfo tensorInfo({ 1, 1, 2, 3 }, ArmnnType, qScale, qOffset);
-
- std::vector<float> inputData =
- {
- -1.f, 2.f, -3.f,
- 4.f, -5.f, 6.f
- };
-
- std::vector<float> expectedOutputData =
- {
- 1.f, 2.f, 3.f,
- 4.f, 5.f, 6.f
- };
-
- // quantize data
- std::vector<T> qInputData = armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset);
- std::vector<T> qExpectedOutputData = armnnUtils::QuantizedVector<T>(expectedOutputData, qScale, qOffset);
-
- INetworkPtr network = CreateAbsNetwork(tensorInfo);
-
- EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network),
- { { 0, qInputData } },
- { { 0, qExpectedOutputData } },
- backends);
-}
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index 82df782317..4716bd47e4 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -4,7 +4,6 @@
#
list(APPEND armnnBackendsCommonUnitTests_sources
- AbsEndToEndTestImpl.hpp
ActivationFixture.hpp
ArgMinMaxEndToEndTestImpl.hpp
BackendIdTests.cpp
@@ -19,6 +18,7 @@ list(APPEND armnnBackendsCommonUnitTests_sources
DetectionPostProcessEndToEndTestImpl.hpp
DynamicBackendTests.cpp
DynamicBackendTests.hpp
+ ElementwiseUnaryEndToEndTestImpl.hpp
EndToEndTestImpl.hpp
GatherEndToEndTestImpl.hpp
InstanceNormalizationEndToEndTestImpl.cpp
@@ -81,6 +81,8 @@ list(APPEND armnnBackendsCommonUnitTests_sources
layerTests/DivisionTestImpl.cpp
layerTests/DivisionTestImpl.hpp
layerTests/ElementwiseTestImpl.hpp
+ layerTests/ElementwiseUnaryTestImpl.cpp
+ layerTests/ElementwiseUnaryTestImpl.hpp
layerTests/FakeQuantizationTestImpl.cpp
layerTests/FakeQuantizationTestImpl.hpp
layerTests/FloorTestImpl.cpp
diff --git a/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp
new file mode 100644
index 0000000000..4c93735bc8
--- /dev/null
+++ b/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp
@@ -0,0 +1,77 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "CommonTestUtils.hpp"
+
+#include <ResolveType.hpp>
+
+#include <armnn/INetwork.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+#include <vector>
+
+namespace
+{
+
+template<armnn::DataType ArmnnTypeInput>
+INetworkPtr CreateElementwiseUnaryNetwork(const TensorShape& inputShape,
+ const TensorShape& outputShape,
+ UnaryOperation operation,
+ const float qScale = 1.0f,
+ const int32_t qOffset = 0)
+{
+ using namespace armnn;
+
+ INetworkPtr net(INetwork::Create());
+
+ ElementwiseUnaryDescriptor descriptor(operation);
+ IConnectableLayer* elementwiseUnaryLayer = net->AddElementwiseUnaryLayer(descriptor, "elementwiseUnary");
+
+ TensorInfo inputTensorInfo(inputShape, ArmnnTypeInput, qScale, qOffset);
+ IConnectableLayer* input = net->AddInputLayer(boost::numeric_cast<LayerBindingId>(0));
+ Connect(input, elementwiseUnaryLayer, inputTensorInfo, 0, 0);
+
+ TensorInfo outputTensorInfo(outputShape, ArmnnTypeInput, qScale, qOffset);
+ IConnectableLayer* output = net->AddOutputLayer(0, "output");
+ Connect(elementwiseUnaryLayer, output, outputTensorInfo, 0, 0);
+
+ return net;
+}
+
+template<armnn::DataType ArmnnInType,
+ typename TInput = armnn::ResolveType<ArmnnInType>>
+void ElementwiseUnarySimpleEndToEnd(const std::vector<BackendId>& backends,
+ UnaryOperation operation,
+ const std::vector<float> expectedOutput)
+{
+ using namespace armnn;
+
+ const float qScale = IsQuantizedType<TInput>() ? 0.25f : 1.0f;
+ const int32_t qOffset = IsQuantizedType<TInput>() ? 50 : 0;
+
+ const TensorShape& inputShape = { 2, 2, 2, 2 };
+ const TensorShape& outputShape = { 2, 2, 2, 2 };
+
+ // Builds up the structure of the network
+ INetworkPtr net = CreateElementwiseUnaryNetwork<ArmnnInType>(inputShape, outputShape, operation, qScale, qOffset);
+
+ BOOST_TEST_CHECKPOINT("create a network");
+
+ const std::vector<float> input({ 1, -1, 1, 1, 5, -5, 5, 5,
+ -3, 3, 3, 3, 4, 4, -4, 4 });
+
+ // quantize data
+ std::vector<TInput> qInputData = armnnUtils::QuantizedVector<TInput>(input, qScale, qOffset);
+ std::vector<TInput> qExpectedOutput = armnnUtils::QuantizedVector<TInput>(expectedOutput, qScale, qOffset);
+
+ std::map<int, std::vector<TInput>> inputTensorData = {{ 0, qInputData }};
+ std::map<int, std::vector<TInput>> expectedOutputData = {{ 0, qExpectedOutput }};
+
+ EndToEndLayerTestImpl<ArmnnInType, ArmnnInType>(move(net), inputTensorData, expectedOutputData, backends);
+}
+
+} // anonymous namespace
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index 031210f1fc..e4ce7407bf 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -427,8 +427,6 @@ template<armnn::LayerType Type, armnn::DataType DataType>
struct LayerTypePolicy;
// Every entry in the armnn::LayerType enum must be accounted for below.
-DECLARE_LAYER_POLICY_1_PARAM(Abs)
-
DECLARE_LAYER_POLICY_2_PARAM(Activation)
DECLARE_LAYER_POLICY_1_PARAM(Addition)
@@ -465,6 +463,8 @@ DECLARE_LAYER_POLICY_1_PARAM(Dequantize)
DECLARE_LAYER_POLICY_2_PARAM(DetectionPostProcess)
+DECLARE_LAYER_POLICY_2_PARAM(ElementwiseUnary)
+
DECLARE_LAYER_POLICY_2_PARAM(FakeQuantization)
DECLARE_LAYER_POLICY_1_PARAM(Floor)
@@ -517,8 +517,6 @@ DECLARE_LAYER_POLICY_2_PARAM(Resize)
DECLARE_LAYER_POLICY_2_PARAM(Reshape)
-DECLARE_LAYER_POLICY_1_PARAM(Rsqrt)
-
DECLARE_LAYER_POLICY_2_PARAM(Slice)
DECLARE_LAYER_POLICY_2_PARAM(Softmax)
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index 05c307ead2..eba7944cc3 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -22,6 +22,7 @@
#include <backendsCommon/test/layerTests/DequantizeTestImpl.hpp>
#include <backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp>
#include <backendsCommon/test/layerTests/DivisionTestImpl.hpp>
+#include <backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.hpp>
#include <backendsCommon/test/layerTests/FakeQuantizationTestImpl.hpp>
#include <backendsCommon/test/layerTests/FloorTestImpl.hpp>
#include <backendsCommon/test/layerTests/FullyConnectedTestImpl.hpp>
diff --git a/src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp
index cc57893439..7706809e8b 100644
--- a/src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp
@@ -4,85 +4,15 @@
//
#include "AbsTestImpl.hpp"
+#include "ElementwiseUnaryTestImpl.hpp"
-#include <backendsCommon/test/DataTypeUtils.hpp>
-#include <backendsCommon/test/TensorCopyUtils.hpp>
-#include <backendsCommon/test/WorkloadTestUtils.hpp>
-
-#include <test/TensorHelpers.hpp>
-
-namespace
-{
-
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 2> Abs2dTestCommon(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
- const armnn::TensorInfo inputTensorInfo,
- const armnn::TensorInfo outputTensorInfo,
- const std::vector<float>& inputValues,
- const std::vector<float>& expectedOutputValues)
-{
- boost::ignore_unused(memoryManager);
- auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputValues,inputTensorInfo));
-
- LayerTestResult<T, 2> result(outputTensorInfo);
-
- result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
- ConvertToDataType<ArmnnType>(expectedOutputValues,outputTensorInfo));
-
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- armnn::AbsQueueDescriptor descriptor;
-
- armnn::WorkloadInfo info;
-
- AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
- AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAbs(descriptor, info);
-
- inputHandle->Allocate();
- outputHandle->Allocate();
-
- CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
-
- workload->PostAllocationConfigure();
- workload->Execute();
-
- CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
-
- return result;
-}
-
-} // anonymous namespace
-
template<armnn::DataType ArmnnType, typename T>
LayerTestResult<T, 2> Abs2dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- const armnn::TensorShape inputShape{ 2, 2 };
- const armnn::TensorShape outputShape{ 2, 2 };
-
- float qScale = 0.0625f;
- int32_t qOffset = 64;
-
- if (ArmnnType == armnn::DataType::QSymmS16)
- {
- qScale = 0.1f;
- qOffset = 0;
- }
-
- armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
- inputTensorInfo.SetQuantizationScale(qScale);
- inputTensorInfo.SetQuantizationOffset(qOffset);
-
- armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
- outputTensorInfo.SetQuantizationScale(qScale);
- outputTensorInfo.SetQuantizationOffset(qOffset);
+ const unsigned int inputShape[] = { 2, 2 };
std::vector<float> inputValues
{
@@ -98,9 +28,14 @@ LayerTestResult<T, 2> Abs2dTest(
std::vector<float> expectedOutputValues(inputValues.size());
std::transform(inputValues.begin(), inputValues.end(), expectedOutputValues.begin(), f);
- return Abs2dTestCommon<ArmnnType>(workloadFactory, memoryManager,
- inputTensorInfo, outputTensorInfo,
- inputValues, expectedOutputValues);
+ return ElementwiseUnaryTestHelper<2, ArmnnType>(
+ workloadFactory,
+ memoryManager,
+ armnn::UnaryOperation::Abs,
+ inputShape,
+ inputValues,
+ inputShape,
+ expectedOutputValues);
}
template<armnn::DataType ArmnnType, typename T>
@@ -108,27 +43,7 @@ LayerTestResult<T, 3> Abs3dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- boost::ignore_unused(memoryManager);
-
- const armnn::TensorShape inputShape{ 3, 1, 2 };
- const armnn::TensorShape outputShape{ 3, 1, 2 };
-
- float qScale = 0.0625f;
- int32_t qOffset = 64;
-
- if (ArmnnType == armnn::DataType::QSymmS16)
- {
- qScale = 0.1f;
- qOffset = 0;
- }
-
- armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
- inputTensorInfo.SetQuantizationScale(qScale);
- inputTensorInfo.SetQuantizationOffset(qOffset);
-
- armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
- outputTensorInfo.SetQuantizationScale(qScale);
- outputTensorInfo.SetQuantizationOffset(qOffset);
+ const unsigned int inputShape[] = { 3, 1, 2 };
std::vector<float> inputValues
{
@@ -143,35 +58,14 @@ LayerTestResult<T, 3> Abs3dTest(
std::vector<float>expectedOutputValues(inputValues.size());
std::transform(inputValues.begin(), inputValues.end(), expectedOutputValues.begin(), f);
- auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputValues,inputTensorInfo));
-
- LayerTestResult<T, 3> result(outputTensorInfo);
- result.outputExpected = MakeTensor<T, 3>(outputTensorInfo,
- ConvertToDataType<ArmnnType>(expectedOutputValues,outputTensorInfo));
-
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- armnn::AbsQueueDescriptor descriptor;
-
- armnn::WorkloadInfo info;
-
- AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
- AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAbs(descriptor, info);
-
- inputHandle->Allocate();
- outputHandle->Allocate();
-
- CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
-
- workload->PostAllocationConfigure();
- workload->Execute();
-
- CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
-
- return result;
+ return ElementwiseUnaryTestHelper<3, ArmnnType>(
+ workloadFactory,
+ memoryManager,
+ armnn::UnaryOperation::Abs,
+ inputShape,
+ inputValues,
+ inputShape,
+ expectedOutputValues);
}
template<armnn::DataType ArmnnType, typename T>
@@ -179,14 +73,7 @@ LayerTestResult<T, 2> AbsZeroTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- const armnn::TensorShape inputShape{ 1, 2 };
- const armnn::TensorShape outputShape{ 1, 2 };
-
- armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
- inputTensorInfo.SetQuantizationScale(0.1f);
-
- armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
- outputTensorInfo.SetQuantizationScale(0.1f);
+ const unsigned int inputShape[] = { 1, 2 };
std::vector<float> inputValues
{
@@ -198,9 +85,14 @@ LayerTestResult<T, 2> AbsZeroTest(
0.f, 0.f
};
- return Abs2dTestCommon<ArmnnType>(workloadFactory, memoryManager,
- inputTensorInfo, outputTensorInfo,
- inputValues, expectedOutputValues);
+ return ElementwiseUnaryTestHelper<2, ArmnnType>(
+ workloadFactory,
+ memoryManager,
+ armnn::UnaryOperation::Abs,
+ inputShape,
+ inputValues,
+ inputShape,
+ expectedOutputValues);
}
//
diff --git a/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp
index c0a779c0e6..cbbe140843 100644
--- a/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp
@@ -15,6 +15,7 @@
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
+#include <backendsCommon/test/DataTypeUtils.hpp>
#include <backendsCommon/test/TensorCopyUtils.hpp>
#include <backendsCommon/test/WorkloadTestUtils.hpp>
@@ -202,4 +203,4 @@ LayerTestResult<T, NumDims> ElementwiseTestHelper(
outValues,
quantScale,
quantOffset);
-}
+} \ No newline at end of file
diff --git a/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.cpp
new file mode 100644
index 0000000000..a2c88a62e7
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.cpp
@@ -0,0 +1,14 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ElementwiseUnaryTestImpl.hpp"
+
+std::unique_ptr<armnn::IWorkload> CreateWorkload(
+ const armnn::IWorkloadFactory& workloadFactory,
+ const armnn::WorkloadInfo& info,
+ const armnn::ElementwiseUnaryQueueDescriptor& descriptor)
+{
+ return workloadFactory.CreateElementwiseUnary(descriptor, info);
+} \ No newline at end of file
diff --git a/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.hpp
new file mode 100644
index 0000000000..bea4ec205e
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.hpp
@@ -0,0 +1,113 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerTestResult.hpp"
+
+#include <armnn/ArmNN.hpp>
+
+#include <ResolveType.hpp>
+
+#include <armnn/backends/IBackendInternal.hpp>
+#include <backendsCommon/Workload.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+#include <backendsCommon/test/DataTypeUtils.hpp>
+#include <backendsCommon/test/TensorCopyUtils.hpp>
+#include <backendsCommon/test/WorkloadTestUtils.hpp>
+
+#include <test/TensorHelpers.hpp>
+
+#include <memory>
+
+std::unique_ptr<armnn::IWorkload> CreateWorkload(
+ const armnn::IWorkloadFactory& workloadFactory,
+ const armnn::WorkloadInfo& info,
+ const armnn::ElementwiseUnaryQueueDescriptor& descriptor);
+
+template <std::size_t NumDims,
+ armnn::DataType ArmnnType,
+ typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, NumDims> ElementwiseUnaryTestHelper(
+ armnn::IWorkloadFactory & workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
+ armnn::UnaryOperation op,
+ const unsigned int shape[NumDims],
+ std::vector<float> values,
+ float quantScale,
+ int quantOffset,
+ const unsigned int outShape[NumDims],
+ std::vector<float> outValues,
+ float outQuantScale,
+ int outQuantOffset)
+{
+ armnn::TensorInfo inputTensorInfo{NumDims, shape, ArmnnType};
+ armnn::TensorInfo outputTensorInfo{NumDims, outShape, ArmnnType};
+
+ inputTensorInfo.SetQuantizationScale(quantScale);
+ inputTensorInfo.SetQuantizationOffset(quantOffset);
+
+ outputTensorInfo.SetQuantizationScale(outQuantScale);
+ outputTensorInfo.SetQuantizationOffset(outQuantOffset);
+
+ auto input = MakeTensor<T, NumDims>(inputTensorInfo, ConvertToDataType<ArmnnType>(values, inputTensorInfo));
+
+ LayerTestResult<T, NumDims> ret(outputTensorInfo);
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+ armnn::ElementwiseUnaryDescriptor desc(op);
+ armnn::ElementwiseUnaryQueueDescriptor qDesc;
+ qDesc.m_Parameters = desc;
+ armnn::WorkloadInfo info;
+ AddInputToWorkload(qDesc, info, inputTensorInfo, inputHandle.get());
+ AddOutputToWorkload(qDesc, info, outputTensorInfo, outputHandle.get());
+ auto workload = CreateWorkload(workloadFactory, info, qDesc);
+
+ inputHandle->Allocate();
+ outputHandle->Allocate();
+
+ CopyDataToITensorHandle(inputHandle.get(), input.origin());
+
+ workload->PostAllocationConfigure();
+ ExecuteWorkload(*workload, memoryManager);
+
+ CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
+
+ ret.outputExpected = MakeTensor<T, NumDims>(outputTensorInfo, ConvertToDataType<ArmnnType>(outValues,
+ inputTensorInfo));
+ return ret;
+}
+
+template <std::size_t NumDims,
+ armnn::DataType ArmnnType,
+ typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, NumDims> ElementwiseUnaryTestHelper(
+ armnn::IWorkloadFactory & workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
+ armnn::UnaryOperation op,
+ const unsigned int shape[NumDims],
+ std::vector<float> values,
+ const unsigned int outShape[NumDims],
+ std::vector<float> outValues,
+ float quantScale = 1.0f,
+ int quantOffset = 0)
+{
+ return ElementwiseUnaryTestHelper<NumDims, ArmnnType>(
+ workloadFactory,
+ memoryManager,
+ op,
+ shape,
+ values,
+ quantScale,
+ quantOffset,
+ outShape,
+ outValues,
+ quantScale,
+ quantOffset);
+} \ No newline at end of file
diff --git a/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp
index db928cf2e0..ca423835dc 100644
--- a/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp
@@ -4,76 +4,15 @@
//
#include "ReshapeTestImpl.hpp"
+#include "ElementwiseUnaryTestImpl.hpp"
-#include <backendsCommon/test/DataTypeUtils.hpp>
-#include <backendsCommon/test/TensorCopyUtils.hpp>
-#include <backendsCommon/test/WorkloadTestUtils.hpp>
-
-#include <test/TensorHelpers.hpp>
-
-namespace
-{
-
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 2> Rsqrt2dTestCommon(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
- const armnn::TensorInfo inputTensorInfo,
- const armnn::TensorInfo outputTensorInfo,
- const std::vector<float>& inputValues,
- const std::vector<float>& expectedOutputValues)
-{
- boost::ignore_unused(memoryManager);
- auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputValues,inputTensorInfo));
-
- LayerTestResult<T, 2> result(outputTensorInfo);
-
- result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
- ConvertToDataType<ArmnnType>(expectedOutputValues,outputTensorInfo));
-
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- armnn::RsqrtQueueDescriptor descriptor;
-
- armnn::WorkloadInfo info;
-
- AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
- AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateRsqrt(descriptor, info);
-
- inputHandle->Allocate();
- outputHandle->Allocate();
-
- CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
-
- workload->PostAllocationConfigure();
- workload->Execute();
-
- CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
-
- return result;
-}
-
-} // anonymous namespace
-
template<armnn::DataType ArmnnType, typename T>
LayerTestResult<T, 2> Rsqrt2dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- const armnn::TensorShape inputShape{ 2, 2 };
- const armnn::TensorShape outputShape{ 2, 2 };
-
- armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
- inputTensorInfo.SetQuantizationScale(0.1f);
- inputTensorInfo.SetQuantizationOffset(0);
-
- armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
- outputTensorInfo.SetQuantizationScale(0.1f);
- outputTensorInfo.SetQuantizationOffset(0);
+ const unsigned int inputShape[] = { 2, 2 };
std::vector<float> inputValues
{
@@ -87,9 +26,14 @@ LayerTestResult<T, 2> Rsqrt2dTest(
0.25f, 0.2f
};
- return Rsqrt2dTestCommon<ArmnnType>(workloadFactory, memoryManager,
- inputTensorInfo, outputTensorInfo,
- inputValues, expectedOutputValues);
+ return ElementwiseUnaryTestHelper<2, ArmnnType>(
+ workloadFactory,
+ memoryManager,
+ armnn::UnaryOperation::Rsqrt,
+ inputShape,
+ inputValues,
+ inputShape,
+ expectedOutputValues);
}
template<armnn::DataType ArmnnType, typename T>
@@ -97,17 +41,7 @@ LayerTestResult<T, 3> Rsqrt3dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- boost::ignore_unused(memoryManager);
- const armnn::TensorShape inputShape{ 3, 1, 2 };
- const armnn::TensorShape outputShape{ 3, 1, 2 };
-
- armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
- inputTensorInfo.SetQuantizationScale(0.1f);
- inputTensorInfo.SetQuantizationOffset(0);
-
- armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
- outputTensorInfo.SetQuantizationScale(0.1f);
- outputTensorInfo.SetQuantizationOffset(0);
+ const unsigned int inputShape[] = { 3, 1, 2 };
std::vector<float> inputValues
{
@@ -121,35 +55,14 @@ LayerTestResult<T, 3> Rsqrt3dTest(
0.2f, 0.125f, 0.1f
};
- auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputValues,inputTensorInfo));
-
- LayerTestResult<T, 3> result(outputTensorInfo);
- result.outputExpected = MakeTensor<T, 3>(outputTensorInfo,
- ConvertToDataType<ArmnnType>(expectedOutputValues,outputTensorInfo));
-
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
- armnn::RsqrtQueueDescriptor descriptor;
-
- armnn::WorkloadInfo info;
-
- AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
- AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateRsqrt(descriptor, info);
-
- inputHandle->Allocate();
- outputHandle->Allocate();
-
- CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
-
- workload->PostAllocationConfigure();
- workload->Execute();
-
- CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
-
- return result;
+ return ElementwiseUnaryTestHelper<3, ArmnnType>(
+ workloadFactory,
+ memoryManager,
+ armnn::UnaryOperation::Rsqrt,
+ inputShape,
+ inputValues,
+ inputShape,
+ expectedOutputValues);
}
template<armnn::DataType ArmnnType, typename T>
@@ -157,14 +70,7 @@ LayerTestResult<T, 2> RsqrtZeroTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- const armnn::TensorShape inputShape{ 1, 2 };
- const armnn::TensorShape outputShape{ 1, 2 };
-
- armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
- inputTensorInfo.SetQuantizationScale(0.1f);
-
- armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
- outputTensorInfo.SetQuantizationScale(0.1f);
+ const unsigned int inputShape[] = { 1, 2 };
std::vector<float> inputValues
{
@@ -176,9 +82,14 @@ LayerTestResult<T, 2> RsqrtZeroTest(
INFINITY, -INFINITY
};
- return Rsqrt2dTestCommon<ArmnnType>(workloadFactory, memoryManager,
- inputTensorInfo, outputTensorInfo,
- inputValues, expectedOutputValues);
+ return ElementwiseUnaryTestHelper<2, ArmnnType>(
+ workloadFactory,
+ memoryManager,
+ armnn::UnaryOperation::Rsqrt,
+ inputShape,
+ inputValues,
+ inputShape,
+ expectedOutputValues);
}
template<armnn::DataType ArmnnType, typename T>
@@ -186,16 +97,7 @@ LayerTestResult<T, 2> RsqrtNegativeTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- const armnn::TensorShape inputShape{ 1, 2 };
- const armnn::TensorShape outputShape{ 1, 2 };
-
- armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
- inputTensorInfo.SetQuantizationScale(0.1f);
- inputTensorInfo.SetQuantizationOffset(0);
-
- armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
- outputTensorInfo.SetQuantizationScale(0.1f);
- outputTensorInfo.SetQuantizationOffset(0);
+ const unsigned int inputShape[] = { 1, 2 };
std::vector<float> inputValues
{
@@ -207,9 +109,14 @@ LayerTestResult<T, 2> RsqrtNegativeTest(
-NAN, -NAN
};
- return Rsqrt2dTestCommon<ArmnnType>(workloadFactory, memoryManager,
- inputTensorInfo, outputTensorInfo,
- inputValues, expectedOutputValues);
+ return ElementwiseUnaryTestHelper<2, ArmnnType>(
+ workloadFactory,
+ memoryManager,
+ armnn::UnaryOperation::Rsqrt,
+ inputShape,
+ inputValues,
+ inputShape,
+ expectedOutputValues);
}
//