aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTianle Cheng <tianle.cheng@arm.com>2023-12-12 13:52:22 +0000
committerTianle Cheng <tianle.cheng@arm.com>2023-12-15 10:22:31 +0000
commit7790dc6531034778d92ba264fd61174bcff7051e (patch)
treefe24f0a8a1229296168db559501a7403d5cbf962
parentf0d8ec1f4be3ce5794a48c3d3449fe93f2e58d04 (diff)
downloadarmnn-7790dc6531034778d92ba264fd61174bcff7051e.tar.gz
IVGCVSW-8113 Update EndToEnd tests and TosaReference support for Maximum
* Added ElementwiseBinary EndToEnd tests with inputs of the same shape to avoid Reshape * Added Slice EndToEnd tests with 4D tensors * Added TosaReference support for Maximum and TosaRefEndToEnd tests Signed-off-by: Tianle Cheng <tianle.cheng@arm.com> Change-Id: I4fa24435a75559e00b110d0e542b4f2bf07b21b4
-rw-r--r--src/backends/backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp66
-rw-r--r--src/backends/backendsCommon/test/SliceEndToEndTestImpl.hpp35
-rw-r--r--src/backends/tosaCommon/TosaMappings.cpp5
-rw-r--r--src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.cpp24
-rw-r--r--src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.hpp4
-rw-r--r--src/backends/tosaReference/TosaRefLayerSupport.cpp1
-rw-r--r--src/backends/tosaReference/test/TosaRefEndToEndTests.cpp10
7 files changed, 140 insertions, 5 deletions
diff --git a/src/backends/backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp
index 0d47fd6056..dfc7bfc18e 100644
--- a/src/backends/backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp
@@ -110,4 +110,70 @@ void ElementwiseBinarySimpleEndToEnd(const std::vector<BackendId>& backends,
EndToEndLayerTestImpl<ArmnnInType, ArmnnInType>(std::move(net), inputTensorData, expectedOutputData, backends);
}
+template<armnn::DataType ArmnnInType,
+ typename TInput = armnn::ResolveType<ArmnnInType>>
+void ElementwiseBinarySimpleNoReshapeEndToEnd(const std::vector<BackendId>& backends,
+ BinaryOperation operation)
+{
+ using namespace armnn;
+
+ const float qScale = IsQuantizedType<TInput>() ? 0.25f : 1.0f;
+ const int32_t qOffset = IsQuantizedType<TInput>() ? 50 : 0;
+
+ const TensorShape& input1Shape = { 2, 2, 2, 2 };
+ const TensorShape& input2Shape = { 2, 2, 2, 2 };
+ const TensorShape& outputShape = { 2, 2, 2, 2 };
+
+ // Builds up the structure of the network
+ INetworkPtr net = CreateElementwiseBinaryNetwork<ArmnnInType>(input1Shape, input2Shape, outputShape,
+ operation, qScale, qOffset);
+
+ CHECK(net);
+
+ const std::vector<float> input1({ 1, -1, 1, 1, 5, -5, 5, 5, -3, 3, 3, 3, 4, 4, -4, 4 });
+
+ const std::vector<float> input2({ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 });
+
+ std::vector<float> expectedOutput;
+ switch (operation) {
+ case armnn::BinaryOperation::Add:
+ expectedOutput = { 3, 1, 3, 3, 7, -3, 7, 7, -1, 5, 5, 5, 6, 6, -2, 6 };
+ break;
+ case armnn::BinaryOperation::Div:
+ expectedOutput = {0.5f, -0.5f, 0.5f, 0.5f, 2.5f, -2.5f, 2.5f, 2.5f, -1.5f, 1.5f, 1.5f, 1.5f, 2, 2, -2, 2};
+ break;
+ case armnn::BinaryOperation::Maximum:
+ expectedOutput = { 2, 2, 2, 2, 5, 2, 5, 5, 2, 3, 3, 3, 4, 4, 2, 4 };
+ break;
+ case armnn::BinaryOperation::Minimum:
+ expectedOutput = { 1, -1, 1, 1, 2, -5, 2, 2, -3, 2, 2, 2, 2, 2, -4, 2 };
+ break;
+ case armnn::BinaryOperation::Mul:
+ expectedOutput = { 2, -2, 2, 2, 10, -10, 10, 10, -6, 6, 6, 6, 8, 8, -8, 8 };
+ break;
+ case armnn::BinaryOperation::Sub:
+ expectedOutput = { -1, -3, -1, -1, 3, -7, 3, 3, -5, 1, 1, 1, 2, 2, -6, 2 };
+ break;
+ case armnn::BinaryOperation::SqDiff:
+ expectedOutput = { 1, 9, 1, 1, 9, 49, 9, 9, 25, 1, 1, 1, 4, 4, 36, 4 };
+ break;
+ case armnn::BinaryOperation::Power:
+ expectedOutput = { 1, 1, 1, 1, 25, 25, 25, 25, 9, 9, 9, 9, 16, 16, 16, 16 };
+ break;
+ default:
+ throw("Invalid Elementwise Binary operation");
+ }
+
+ const std::vector<float> expectedOutput_const = expectedOutput;
+ // quantize data
+ std::vector<TInput> qInput1Data = armnnUtils::QuantizedVector<TInput>(input1, qScale, qOffset);
+ std::vector<TInput> qInput2Data = armnnUtils::QuantizedVector<TInput>(input2, qScale, qOffset);
+ std::vector<TInput> qExpectedOutput = armnnUtils::QuantizedVector<TInput>(expectedOutput_const, qScale, qOffset);
+
+ std::map<int, std::vector<TInput>> inputTensorData = {{ 0, qInput1Data }, { 1, qInput2Data }};
+ std::map<int, std::vector<TInput>> expectedOutputData = {{ 0, qExpectedOutput }};
+
+ EndToEndLayerTestImpl<ArmnnInType, ArmnnInType>(std::move(net), inputTensorData, expectedOutputData, backends);
+}
+
} // anonymous namespace
diff --git a/src/backends/backendsCommon/test/SliceEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/SliceEndToEndTestImpl.hpp
index 811ce27b79..43bc668753 100644
--- a/src/backends/backendsCommon/test/SliceEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/SliceEndToEndTestImpl.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -96,4 +96,37 @@ void SliceEndToEndFloat16(const std::vector<armnn::BackendId>& backends)
EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network), inputTensorData, expectedOutputData, backends);
}
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void SliceEndToEnd4Dim(const std::vector<armnn::BackendId>& backends)
+{
+ using namespace armnn;
+
+ const TensorShape& inputShape = { 2, 3, 2, 3 };
+ const TensorShape& outputShape = { 1, 3, 2, 1 };
+
+ SliceDescriptor descriptor;
+ descriptor.m_Begin = { 1, 0, 0, 0 };
+ descriptor.m_Size = { 1, 3, 2, 1 };
+
+ INetworkPtr network = CreateSliceNetwork<ArmnnType>(inputShape, outputShape, descriptor);
+
+ CHECK(network);
+
+ std::vector<T> inputData{ 1, 1, 1, 2, 2, 2,
+ 3, 3, 3, 4, 4, 4,
+ 5, 5, 5, 6, 6, 6,
+ 1, 1, 1, 2, 2, 2,
+ 3, 3, 3, 4, 4, 4,
+ 5, 5, 5, 6, 6, 6 };
+
+ std::vector<T> expectedOutput{ 1, 2,
+ 3, 4,
+ 5, 6 };
+
+ std::map<int, std::vector<T>> inputTensorData = { { 0, inputData } };
+ std::map<int, std::vector<T>> expectedOutputData = { { 0, expectedOutput } };
+
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network), inputTensorData, expectedOutputData, backends);
+}
+
} // anonymous namespace \ No newline at end of file
diff --git a/src/backends/tosaCommon/TosaMappings.cpp b/src/backends/tosaCommon/TosaMappings.cpp
index 0bdb1fec3d..dff266d793 100644
--- a/src/backends/tosaCommon/TosaMappings.cpp
+++ b/src/backends/tosaCommon/TosaMappings.cpp
@@ -29,6 +29,11 @@ TosaSerializationBasicBlock* GetTosaMapping(const Layer* layer,
{
return ConvertElementwiseBinaryToTosaOperator(layer, type, inputs, outputs);
}
+ case LayerType::ElementwiseBinary:
+ {
+ auto binaryDesc = PolymorphicDowncast<const ElementwiseBinaryDescriptor*>(&descriptor);
+ return ConvertElementwiseBinaryToTosaOperator(layer, type, inputs, outputs, binaryDesc);
+ }
case LayerType::ElementwiseUnary:
{
auto unaryDesc = PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor);
diff --git a/src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.cpp b/src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.cpp
index 28e7ec8231..6c2b31437b 100644
--- a/src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -8,7 +8,8 @@
TosaSerializationBasicBlock* ConvertElementwiseBinaryToTosaOperator(const Layer* layer,
const LayerType type,
const std::vector<const TensorInfo*>& inputs,
- const std::vector<const TensorInfo*>& outputs)
+ const std::vector<const TensorInfo*>& outputs,
+ const ElementwiseBinaryDescriptor* descriptor)
{
std::string input0Name = std::string("input0_");
std::string input1Name = std::string("input1_");
@@ -43,6 +44,25 @@ TosaSerializationBasicBlock* ConvertElementwiseBinaryToTosaOperator(const Layer*
blockName = std::string("Op_ADD_block_") + GetUniqueTosaMappingID();
break;
}
+ case LayerType::ElementwiseBinary:
+ {
+ switch (descriptor->m_Operation)
+ {
+ case armnn::BinaryOperation::Maximum:
+ {
+ op = new TosaSerializationOperator(Op_MAXIMUM,
+ Attribute_NONE,
+ nullptr,
+ {input0Name, input1Name},
+ {outputName});
+ blockName = std::string("Op_MAXIMUM_block_") + GetUniqueTosaMappingID();
+ break;
+ }
+ default:
+ throw armnn::Exception("ConvertElementwiseBinaryToTosaOperator: Unsupported layer type.");
+ }
+ break;
+ }
case LayerType::Multiplication:
{
int32_t shift = 0;
diff --git a/src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.hpp b/src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.hpp
index 4966ed1659..05a15d235a 100644
--- a/src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.hpp
+++ b/src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.hpp
@@ -13,4 +13,6 @@ using namespace tosa;
TosaSerializationBasicBlock* ConvertElementwiseBinaryToTosaOperator(const Layer* layer,
const LayerType type,
const std::vector<const TensorInfo*>& inputs,
- const std::vector<const TensorInfo*>& outputs); \ No newline at end of file
+ const std::vector<const TensorInfo*>& outputs,
+ const ElementwiseBinaryDescriptor*
+ descriptor = nullptr); \ No newline at end of file
diff --git a/src/backends/tosaReference/TosaRefLayerSupport.cpp b/src/backends/tosaReference/TosaRefLayerSupport.cpp
index ec6fc3b917..60d0f7c6d6 100644
--- a/src/backends/tosaReference/TosaRefLayerSupport.cpp
+++ b/src/backends/tosaReference/TosaRefLayerSupport.cpp
@@ -40,6 +40,7 @@ bool TosaRefLayerSupport::IsLayerSupported(const LayerType& type,
case LayerType::Addition:
case LayerType::Multiplication:
case LayerType::Subtraction:
+ case LayerType::ElementwiseBinary:
// Setup inputs and outputs
inputInfos.push_back(&infos[0]);
inputInfos.push_back(&infos[1]);
diff --git a/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp b/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp
index ae90c66f39..05d4114382 100644
--- a/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp
+++ b/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp
@@ -8,11 +8,12 @@
#include "backendsCommon/test/AdditionEndToEndTestImpl.hpp"
#include "backendsCommon/test/Convolution2dEndToEndTestImpl.hpp"
#include "backendsCommon/test/ConcatEndToEndTestImpl.hpp"
+#include "backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp"
+#include "backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp"
#include "backendsCommon/test/MultiplicationEndToEndTestImpl.hpp"
#include "backendsCommon/test/Pooling2dEndToEndTestImpl.hpp"
#include "backendsCommon/test/ReshapeEndToEndTestImpl.hpp"
#include "backendsCommon/test/ResizeEndToEndTestImpl.hpp"
-#include "backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp"
#include "backendsCommon/test/SliceEndToEndTestImpl.hpp"
#include "backendsCommon/test/SplitterEndToEndTestImpl.hpp"
#include "backendsCommon/test/SubtractionEndToEndTestImpl.hpp"
@@ -109,6 +110,13 @@ TEST_CASE("TosaRefAvgPool2DIgnoreValueEndtoEndTestFloat32")
AvgPool2dEndToEnd<DataType::Float32>(tosaDefaultBackends, PaddingMethod::IgnoreValue);
}
+// Maximum
+TEST_CASE("TosaRefMaximumEndtoEndTestInt8")
+{
+ ElementwiseBinarySimpleNoReshapeEndToEnd<DataType::Signed32>(tosaDefaultBackends,
+ armnn::BinaryOperation::Maximum);
+}
+
// Max Pool 2D
TEST_CASE("TosaRefMaxPool2DEndtoEndTestFloat32")
{