From 9a33946fd0d5e14be6f957b5a985438fa69684d6 Mon Sep 17 00:00:00 2001 From: Nikhil Raj Date: Mon, 5 Dec 2022 11:24:35 +0000 Subject: IVGCVSW-7172 Add ElementwiseBinary (Subtraction & Multiplication) support to TOSA Reference Backend * Removed AdditionOperator and moved to new ElementwiseBinaryOperator. Signed-off-by: Nikhil Raj Signed-off-by: Matthew Sloyan Change-Id: I8ce20f7575d68334aadcd176827bca3db53d0052 --- src/backends/backendsCommon/test/CMakeLists.txt | 4 +- .../test/MultiplicationEndToEndTestImpl.hpp | 96 +++++++++++++++++++ .../test/SubtractionEndToEndTestImpl.hpp | 96 +++++++++++++++++++ src/backends/tosaCommon/TosaMappings.cpp | 4 +- .../operatorMappings/AdditionOperator.cpp | 72 -------------- .../operatorMappings/AdditionOperator.hpp | 20 ---- .../tosaCommon/operatorMappings/CMakeLists.txt | 4 +- .../operatorMappings/ElementwiseBinaryOperator.cpp | 103 +++++++++++++++++++++ .../operatorMappings/ElementwiseBinaryOperator.hpp | 20 ++++ .../operatorMappings/TosaCommonOperators.hpp | 6 +- .../tosaCommon/test/OneToOneMappingTests.cpp | 48 ++++++++++ src/backends/tosaReference/TosaRefLayerSupport.cpp | 2 + .../tosaReference/test/TosaRefEndToEndTests.cpp | 31 +++++++ .../test/TosaRefLayerSupportTests.cpp | 88 ++++++++++++++++++ 14 files changed, 495 insertions(+), 99 deletions(-) create mode 100644 src/backends/backendsCommon/test/MultiplicationEndToEndTestImpl.hpp create mode 100644 src/backends/backendsCommon/test/SubtractionEndToEndTestImpl.hpp delete mode 100644 src/backends/tosaCommon/operatorMappings/AdditionOperator.cpp delete mode 100644 src/backends/tosaCommon/operatorMappings/AdditionOperator.hpp create mode 100644 src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.cpp create mode 100644 src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.hpp (limited to 'src') diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt index 5fcc8b592e..d251bd2597 100644 --- a/src/backends/backendsCommon/test/CMakeLists.txt +++ b/src/backends/backendsCommon/test/CMakeLists.txt @@ -1,5 +1,5 @@ # -# Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved. +# Copyright © 2017-2022 Arm Ltd and Contributors. All rights reserved. # SPDX-License-Identifier: MIT # @@ -41,6 +41,7 @@ list(APPEND armnnBackendsCommonUnitTests_sources LogSoftmaxEndToEndTestImpl.hpp MemoryManagerTests.cpp MockBackendId.hpp + MultiplicationEndToEndTestImpl.hpp OptimizeSubgraphViewTests.cpp OptimizationViewsTests.cpp PreluEndToEndTestImpl.hpp @@ -57,6 +58,7 @@ list(APPEND armnnBackendsCommonUnitTests_sources SpaceToDepthEndToEndTestImpl.hpp SplitterEndToEndTestImpl.hpp StridedSliceAsyncEndToEndTest.hpp + SubtractionEndToEndTestImpl.hpp TransposeEndToEndTestImpl.hpp TensorCopyUtils.hpp WorkloadFactoryHelper.hpp diff --git a/src/backends/backendsCommon/test/MultiplicationEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/MultiplicationEndToEndTestImpl.hpp new file mode 100644 index 0000000000..40442e2d47 --- /dev/null +++ b/src/backends/backendsCommon/test/MultiplicationEndToEndTestImpl.hpp @@ -0,0 +1,96 @@ +// +// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include + +#include +#include + +#include + +namespace +{ + +template +armnn::INetworkPtr CreateMultiplicationNetwork(const armnn::TensorShape& inputXShape, + const armnn::TensorShape& inputYShape, + const armnn::TensorShape& outputShape, + const float qScale = 1.0f, + const int32_t qOffset = 0) +{ + using namespace armnn; + + INetworkPtr network(INetwork::Create()); + + TensorInfo inputXTensorInfo(inputXShape, DataType, qScale, qOffset, true); + TensorInfo inputYTensorInfo(inputYShape, DataType, qScale, qOffset, true); + + TensorInfo outputTensorInfo(outputShape, DataType, qScale, qOffset); + + + IConnectableLayer* multiplication = network->AddMultiplicationLayer("multiplication"); + IConnectableLayer* inputX = network->AddInputLayer(0, "inputX"); + IConnectableLayer* inputY = network->AddInputLayer(1, "inputY"); + IConnectableLayer* output = network->AddOutputLayer(0, "output"); + + Connect(inputX, multiplication, inputXTensorInfo, 0, 0); + Connect(inputY, multiplication, inputYTensorInfo, 0, 1); + Connect(multiplication, output, outputTensorInfo, 0, 0); + + return network; +} + +template> +void MultiplicationEndToEnd(const std::vector& backends) +{ + using namespace armnn; + + const TensorShape& inputXShape = { 2, 2 }; + const TensorShape& inputYShape = { 2, 2 }; + const TensorShape& outputShape = { 2, 2 }; + + INetworkPtr network = CreateMultiplicationNetwork(inputXShape, inputYShape, outputShape); + + CHECK(network); + + std::vector inputXData{ 1, 2, 3, 4 }; + std::vector inputYData{ 5, 2, 6, 3 }; + std::vector expectedOutput{ 5, 4, 18, 12 }; + + std::map> inputTensorData = {{ 0, inputXData }, {1, inputYData}}; + std::map> expectedOutputData = { { 0, expectedOutput } }; + + EndToEndLayerTestImpl(std::move(network), inputTensorData, expectedOutputData, backends); +} + +template +void MultiplicationEndToEndFloat16(const std::vector& backends) +{ + using namespace armnn; + using namespace half_float::literal; + using Half = half_float::half; + + const TensorShape& inputXShape = { 2, 2 }; + const TensorShape& inputYShape = { 2, 2 }; + const TensorShape& outputShape = { 2, 2 }; + + INetworkPtr network = CreateMultiplicationNetwork(inputXShape, inputYShape, outputShape); + CHECK(network); + + std::vector inputXData{ 1._h, 2._h, + 3._h, 4._h }; + std::vector inputYData{ 1._h, 2._h, + 3._h, 4._h }; + std::vector expectedOutput{ 1._h, 4._h, + 9._h, 16._h }; + + std::map> inputTensorData = {{ 0, inputXData }, { 1, inputYData }}; + std::map> expectedOutputData = { { 0, expectedOutput } }; + + EndToEndLayerTestImpl(std::move(network), inputTensorData, expectedOutputData, backends); +} + +} // anonymous namespace diff --git a/src/backends/backendsCommon/test/SubtractionEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/SubtractionEndToEndTestImpl.hpp new file mode 100644 index 0000000000..747fe26df0 --- /dev/null +++ b/src/backends/backendsCommon/test/SubtractionEndToEndTestImpl.hpp @@ -0,0 +1,96 @@ +// +// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include + +#include +#include + +#include + +namespace +{ + +template +armnn::INetworkPtr CreateSubtractionNetwork(const armnn::TensorShape& inputXShape, + const armnn::TensorShape& inputYShape, + const armnn::TensorShape& outputShape, + const float qScale = 1.0f, + const int32_t qOffset = 0) +{ + using namespace armnn; + + INetworkPtr network(INetwork::Create()); + + TensorInfo inputXTensorInfo(inputXShape, DataType, qScale, qOffset, true); + TensorInfo inputYTensorInfo(inputYShape, DataType, qScale, qOffset, true); + + TensorInfo outputTensorInfo(outputShape, DataType, qScale, qOffset); + + + IConnectableLayer* subtraction = network->AddSubtractionLayer("subtraction"); + IConnectableLayer* inputX = network->AddInputLayer(0, "inputX"); + IConnectableLayer* inputY = network->AddInputLayer(1, "inputY"); + IConnectableLayer* output = network->AddOutputLayer(0, "output"); + + Connect(inputX, subtraction, inputXTensorInfo, 0, 0); + Connect(inputY, subtraction, inputYTensorInfo, 0, 1); + Connect(subtraction, output, outputTensorInfo, 0, 0); + + return network; +} + +template> +void SubtractionEndToEnd(const std::vector& backends) +{ + using namespace armnn; + + const TensorShape& inputXShape = { 2, 2 }; + const TensorShape& inputYShape = { 2, 2 }; + const TensorShape& outputShape = { 2, 2 }; + + INetworkPtr network = CreateSubtractionNetwork(inputXShape, inputYShape, outputShape); + + CHECK(network); + + std::vector inputXData{ 10, 11, 12, 13 }; + std::vector inputYData{ 5, 7, 6, 8 }; + std::vector expectedOutput{ 5, 4, 6, 5 }; + + std::map> inputTensorData = {{ 0, inputXData }, {1, inputYData}}; + std::map> expectedOutputData = { { 0, expectedOutput } }; + + EndToEndLayerTestImpl(std::move(network), inputTensorData, expectedOutputData, backends); +} + +template +void SubtractionEndToEndFloat16(const std::vector& backends) +{ + using namespace armnn; + using namespace half_float::literal; + using Half = half_float::half; + + const TensorShape& inputXShape = { 2, 2 }; + const TensorShape& inputYShape = { 2, 2 }; + const TensorShape& outputShape = { 2, 2 }; + + INetworkPtr network = CreateSubtractionNetwork(inputXShape, inputYShape, outputShape); + CHECK(network); + + std::vector inputXData{ 11._h, 12._h, + 13._h, 14._h }; + std::vector inputYData{ 5._h, 7._h, + 6._h, 8._h }; + std::vector expectedOutput{ 6._h, 5._h, + 7._h, 6._h }; + + std::map> inputTensorData = {{ 0, inputXData }, { 1, inputYData }}; + std::map> expectedOutputData = { { 0, expectedOutput } }; + + EndToEndLayerTestImpl(std::move(network), inputTensorData, expectedOutputData, backends); +} + +} // anonymous namespace diff --git a/src/backends/tosaCommon/TosaMappings.cpp b/src/backends/tosaCommon/TosaMappings.cpp index 1452e4aefd..b0f8fd9c2d 100644 --- a/src/backends/tosaCommon/TosaMappings.cpp +++ b/src/backends/tosaCommon/TosaMappings.cpp @@ -24,8 +24,10 @@ TosaSerializationBasicBlock* GetTosaMapping(const Layer* layer, switch (type) { case LayerType::Addition: + case LayerType::Multiplication: + case LayerType::Subtraction: { - return ConvertAdditionToTosaOperator(layer, inputs, outputs); + return ConvertElementwiseBinaryToTosaOperator(layer, type, inputs, outputs); } case LayerType::Concat: { diff --git a/src/backends/tosaCommon/operatorMappings/AdditionOperator.cpp b/src/backends/tosaCommon/operatorMappings/AdditionOperator.cpp deleted file mode 100644 index 7014886d92..0000000000 --- a/src/backends/tosaCommon/operatorMappings/AdditionOperator.cpp +++ /dev/null @@ -1,72 +0,0 @@ -// -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "AdditionOperator.hpp" - -TosaSerializationBasicBlock* ConvertAdditionToTosaOperator(const Layer* layer, - const std::vector& inputs, - const std::vector& outputs) -{ - std::string input0Name = std::string("input0_"); - std::string input1Name = std::string("input1_"); - std::string outputName = std::string("output0_"); - std::string blockName = std::string("Op_ADD_block_") + GetUniqueTosaMappingID(); - - // If a layer is present then the block will be used for execution, so input and output names need to be determined - // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter. - if(layer != nullptr) - { - // Get the layers connected to the input slots and determine unique tensors names. - Layer& connectedLayer0 = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer(); - input0Name = GenerateUniqueName(connectedLayer0, 0); - - Layer& connectedLayer1 = layer->GetInputSlot(1).GetConnectedOutputSlot()->GetOwningLayer(); - input1Name = GenerateUniqueName(connectedLayer1, 1); - - // Determine unique output tensor name. - outputName = GenerateUniqueOutputName(*layer, 0); - } - - auto* op = new TosaSerializationOperator(Op_ADD, - Attribute_NONE, - nullptr, - {input0Name, input1Name}, - {outputName}); - - - std::vector tensors; - - // Only add input tensors if connected layer is an input layer. - // As intermediate or constant tensors will be created separately. - // There also can't be duplicate tensor. - if(input0Name.find("input0_") != std::string::npos) - { - std::vector inputShape0 = GetTosaTensorShape(inputs[0]->GetShape()); - DType inputDType0 = ArmNNToDType(inputs[0]->GetDataType()); - - tensors.push_back(new TosaSerializationTensor(input0Name, inputShape0, inputDType0, {})); - } - - if(input1Name.find("input1_") != std::string::npos) - { - std::vector inputShape1 = GetTosaTensorShape(inputs[1]->GetShape()); - DType inputDType1 = ArmNNToDType(inputs[1]->GetDataType()); - - tensors.push_back(new TosaSerializationTensor(input1Name, inputShape1, inputDType1, {})); - } - - std::vector outputShape0 = GetTosaTensorShape(outputs[0]->GetShape()); - DType outputDType0 = ArmNNToDType(outputs[0]->GetDataType()); - - tensors.push_back(new TosaSerializationTensor(outputName, outputShape0, outputDType0, {})); - - // operatorInputNames/operatorOutputNames ends up being the same as - // blockInputNames/blockOutputNames for one-to-one ArmNN to TOSA mappings - return new TosaSerializationBasicBlock(blockName, // name - {op}, // operators - tensors, // tensors - {input0Name, input1Name}, // inputs - {outputName}); // outputs -} \ No newline at end of file diff --git a/src/backends/tosaCommon/operatorMappings/AdditionOperator.hpp b/src/backends/tosaCommon/operatorMappings/AdditionOperator.hpp deleted file mode 100644 index 5eb7441531..0000000000 --- a/src/backends/tosaCommon/operatorMappings/AdditionOperator.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#pragma once - -#include "TosaOperatorUtils.hpp" - -#include - -#include - -using namespace armnn; -using namespace tosa; - -TosaSerializationBasicBlock* ConvertAdditionToTosaOperator(const Layer* layer, - const std::vector& inputs, - const std::vector& outputs); - diff --git a/src/backends/tosaCommon/operatorMappings/CMakeLists.txt b/src/backends/tosaCommon/operatorMappings/CMakeLists.txt index 2443dc0585..6e897aaa8c 100644 --- a/src/backends/tosaCommon/operatorMappings/CMakeLists.txt +++ b/src/backends/tosaCommon/operatorMappings/CMakeLists.txt @@ -4,8 +4,6 @@ # list(APPEND armnnTosaBackendOperators_sources - AdditionOperator.hpp - AdditionOperator.cpp AvgPool2DIgnoreValueOperator.hpp AvgPool2DIgnoreValueOperator.cpp ConcatOperator.hpp @@ -14,6 +12,8 @@ list(APPEND armnnTosaBackendOperators_sources ConstantOperator.cpp Conv2dOperator.hpp Conv2dOperator.cpp + ElementwiseBinaryOperator.hpp + ElementwiseBinaryOperator.cpp Pooling2DOperator.hpp Pooling2DOperator.cpp ReshapeOperator.hpp diff --git a/src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.cpp b/src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.cpp new file mode 100644 index 0000000000..9909e66a7d --- /dev/null +++ b/src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.cpp @@ -0,0 +1,103 @@ +// +// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "ElementwiseBinaryOperator.hpp" + +TosaSerializationBasicBlock* ConvertElementwiseBinaryToTosaOperator(const Layer* layer, + const LayerType type, + const std::vector& inputs, + const std::vector& outputs) +{ + std::string input0Name = std::string("input0_"); + std::string input1Name = std::string("input1_"); + std::string outputName = std::string("output0_"); + std::string blockName; + + // If a layer is present then the block will be used for execution, so input and output names need to be determined + // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter. + if(layer != nullptr) + { + // Get the layers connected to the input slots and determine unique tensor names. + Layer& connectedLayer0 = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer(); + input0Name = GenerateUniqueName(connectedLayer0, 0); + + Layer& connectedLayer1 = layer->GetInputSlot(1).GetConnectedOutputSlot()->GetOwningLayer(); + input1Name = GenerateUniqueName(connectedLayer1, 1); + + // Determine unique output tensor name. + outputName = GenerateUniqueOutputName(*layer, 0); + } + + TosaSerializationOperator* op = nullptr; + switch(type) + { + case LayerType::Addition: + { + op = new TosaSerializationOperator(Op_ADD, + Attribute_NONE, + nullptr, + {input0Name, input1Name}, + {outputName}); + blockName = std::string("Op_ADD_block_") + GetUniqueTosaMappingID(); + break; + } + case LayerType::Multiplication: + { + int32_t shift = 0; + TosaMulAttribute mulAttribute(shift); + op = new TosaSerializationOperator(Op_MUL, + Attribute_MulAttribute, + &mulAttribute, + {input0Name, input1Name}, + {outputName}); + blockName = std::string("Op_MUL_block_") + GetUniqueTosaMappingID(); + break; + } + case LayerType::Subtraction: + { + op = new TosaSerializationOperator(Op_SUB, + Attribute_NONE, + nullptr, + {input0Name, input1Name}, + {outputName}); + blockName = std::string("Op_SUB_block_") + GetUniqueTosaMappingID(); + break; + } + default: + throw armnn::Exception("ConvertElementwiseBinaryToTosaOperator: Unsupported layer type."); + } + ARMNN_ASSERT(op != nullptr); + + std::vector tensors; + // Only add input tensors if connected layer is an input layer. + // As intermediate or constant tensors will be created separately. + // There also can't be duplicate tensor. + if(input0Name.find("input0_") != std::string::npos) + { + std::vector inputShape0 = GetTosaTensorShape(inputs[0]->GetShape()); + DType inputDType0 = ArmNNToDType(inputs[0]->GetDataType()); + tensors.push_back(new TosaSerializationTensor(input0Name, inputShape0, inputDType0, {})); + } + if(input1Name.find("input1_") != std::string::npos) + { + std::vector inputShape1 = GetTosaTensorShape(inputs[1]->GetShape()); + DType inputDType1 = ArmNNToDType(inputs[1]->GetDataType()); + tensors.push_back(new TosaSerializationTensor(input1Name, inputShape1, inputDType1, {})); + } + + std::vector outputShape0 = GetTosaTensorShape(outputs[0]->GetShape()); + DType outputDType0 = ArmNNToDType(outputs[0]->GetDataType()); + + tensors.push_back(new TosaSerializationTensor(outputName, outputShape0, outputDType0, {})); + + // operatorInputNames/operatorOutputNames ends up being the same as + // blockInputNames/blockOutputNames for one-to-one ArmNN to Tosa mappings + return new TosaSerializationBasicBlock(blockName, // name + {op}, // operators + tensors, // tensors + {input0Name, input1Name}, // inputs + {outputName}); // outputs +} + diff --git a/src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.hpp b/src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.hpp new file mode 100644 index 0000000000..86031c6e06 --- /dev/null +++ b/src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.hpp @@ -0,0 +1,20 @@ +// +// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "TosaOperatorUtils.hpp" + +#include + +#include + +using namespace armnn; +using namespace tosa; + +TosaSerializationBasicBlock* ConvertElementwiseBinaryToTosaOperator(const Layer* layer, + const LayerType type, + const std::vector& inputs, + const std::vector& outputs); \ No newline at end of file diff --git a/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp b/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp index 052c54c3af..7b117d8443 100644 --- a/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp +++ b/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp @@ -5,12 +5,12 @@ #pragma once -#include "AdditionOperator.hpp" +#include "AvgPool2DIgnoreValueOperator.hpp" #include "ConcatOperator.hpp" #include "ConstantOperator.hpp" #include "Conv2dOperator.hpp" -#include "AvgPool2DIgnoreValueOperator.hpp" +#include "ElementwiseBinaryOperator.hpp" #include "Pooling2DOperator.hpp" #include "ReshapeOperator.hpp" #include "SliceOperator.hpp" -#include "TransposeConv2dOperator.hpp" \ No newline at end of file +#include "TransposeConv2dOperator.hpp" diff --git a/src/backends/tosaCommon/test/OneToOneMappingTests.cpp b/src/backends/tosaCommon/test/OneToOneMappingTests.cpp index b3ab14a774..146a9cba37 100644 --- a/src/backends/tosaCommon/test/OneToOneMappingTests.cpp +++ b/src/backends/tosaCommon/test/OneToOneMappingTests.cpp @@ -253,6 +253,54 @@ TEST_CASE("GetTosaMappingFromLayer_Conv2dLayer") basicBlock, inputShape, outputShape, Op_CONV2D, Attribute_ConvAttribute, descriptor, LayerType::Convolution2d); } +TEST_CASE("GetTosaMapping_MultiplicationLayer") +{ + + const TensorInfo input0Info ({ 1, 2, 4, 2 }, DataType::Float32); + const TensorInfo input1Info ({ 1, 2, 4, 2 }, DataType::Float32); + const TensorInfo outputInfo ({ 1, 2, 4, 2 }, DataType::Float32); + + std::vector> inputShape = {{ 1, 2, 4, 2 }, { 1, 2, 4, 2 }}; + std::vector> outputShape = {{ 1, 2, 4, 2 }}; + + TosaSerializationBasicBlock* basicBlock = + GetTosaMapping(nullptr, LayerType::Multiplication, {&input0Info, &input1Info}, {&outputInfo}, BaseDescriptor()); + AssertTosaOneToOneMappingBasicBlock( basicBlock, inputShape, outputShape, + tosa::Op_MUL, tosa::Attribute_MulAttribute, BaseDescriptor(), LayerType::Multiplication); +} + +TEST_CASE("GetTosaMappingFromLayer_MultiplicationLayer") +{ + IRuntime::CreationOptions options; + IRuntimePtr runtime(IRuntime::Create(options)); + + // Builds up the structure of the network. + INetworkPtr net(INetwork::Create()); + + IConnectableLayer* input0 = net->AddInputLayer(0, "input0"); + IConnectableLayer* input1 = net->AddInputLayer(1, "input1"); + IConnectableLayer* add = net->AddMultiplicationLayer("multiplication"); + IConnectableLayer* output = net->AddOutputLayer(0, "output"); + + input0->GetOutputSlot(0).Connect(add->GetInputSlot(0)); + input1->GetOutputSlot(0).Connect(add->GetInputSlot(1)); + add->GetOutputSlot(0).Connect(output->GetInputSlot(0)); + + TensorInfo info = TensorInfo({ 2, 2 }, DataType::Float32, 0.0f, 0, true); + + input0->GetOutputSlot(0).SetTensorInfo(info); + input1->GetOutputSlot(0).SetTensorInfo(info); + add->GetOutputSlot(0).SetTensorInfo(info); + + std::vector> inputShape = {{ 2, 2 }, { 2, 2 }}; + std::vector> outputShape = {{ 2, 2 }}; + + TosaSerializationBasicBlock* basicBlock = + GetTosaMappingFromLayer(PolymorphicDowncast(add)); + AssertTosaOneToOneMappingBasicBlock( basicBlock, inputShape, outputShape, + tosa::Op_MUL, Attribute_MulAttribute, BaseDescriptor(), LayerType::Multiplication); +} + TEST_CASE("GetTosaMapping_AvgPool2DLayer") { Pooling2dDescriptor descriptor; diff --git a/src/backends/tosaReference/TosaRefLayerSupport.cpp b/src/backends/tosaReference/TosaRefLayerSupport.cpp index 0d0d07a783..b37ecc4ab7 100644 --- a/src/backends/tosaReference/TosaRefLayerSupport.cpp +++ b/src/backends/tosaReference/TosaRefLayerSupport.cpp @@ -38,6 +38,8 @@ bool TosaRefLayerSupport::IsLayerSupported(const LayerType& type, case LayerType::Output: return true; case LayerType::Addition: + case LayerType::Multiplication: + case LayerType::Subtraction: // Setup inputs and outputs inputInfos.push_back(&infos[0]); inputInfos.push_back(&infos[1]); diff --git a/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp b/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp index a377293fbf..67b87ae8b9 100644 --- a/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp +++ b/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp @@ -8,9 +8,11 @@ #include "backendsCommon/test/AdditionEndToEndTestImpl.hpp" #include "backendsCommon/test/Convolution2dEndToEndTestImpl.hpp" #include "backendsCommon/test/ConcatEndToEndTestImpl.hpp" +#include "backendsCommon/test/MultiplicationEndToEndTestImpl.hpp" #include "backendsCommon/test/Pooling2dEndToEndTestImpl.hpp" #include "backendsCommon/test/ReshapeEndToEndTestImpl.hpp" #include "backendsCommon/test/SliceEndToEndTestImpl.hpp" +#include "backendsCommon/test/SubtractionEndToEndTestImpl.hpp" #include "backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp" #include @@ -150,6 +152,35 @@ TEST_CASE("TosaRefSliceEndtoEndTestFloat16") { SliceEndToEndFloat16(tosaDefaultBackends); } +TEST_CASE("TosaRefSubtractionEndtoEndTestFloat32") +{ + SubtractionEndToEnd(tosaDefaultBackends); +} + +TEST_CASE("TosaRefSubtractionEndtoEndTestInt32") +{ + SubtractionEndToEnd(tosaDefaultBackends); +} + +TEST_CASE("TosaRefSubtractionEndtoEndTestFloat16") +{ + SubtractionEndToEndFloat16(tosaDefaultBackends); +} + +TEST_CASE("TosaRefMultiplicationEndtoEndTestFloat32") +{ + MultiplicationEndToEnd(tosaDefaultBackends); +} + +TEST_CASE("TosaRefMultiplicationEndtoEndTestInt32") +{ + MultiplicationEndToEnd(tosaDefaultBackends); +} + +TEST_CASE("TosaRefMultiplicationEndtoEndTestFloat16") +{ + MultiplicationEndToEndFloat16(tosaDefaultBackends); +} // TransposeConvolution2d TEST_CASE("TosaRefTransposeConvolution2dEndToEndFloatNhwcTest") diff --git a/src/backends/tosaReference/test/TosaRefLayerSupportTests.cpp b/src/backends/tosaReference/test/TosaRefLayerSupportTests.cpp index 051965f541..9119b13557 100644 --- a/src/backends/tosaReference/test/TosaRefLayerSupportTests.cpp +++ b/src/backends/tosaReference/test/TosaRefLayerSupportTests.cpp @@ -190,6 +190,50 @@ TEST_CASE("IsLayerSupportedTosaReferenceConv2dUnsupported") CHECK(!supported); } +TEST_CASE("IsLayerSupportedTosaReferenceMultiplication") +{ + TensorShape shape0 = {1,1,3,4}; + TensorShape shape1 = {1,1,3,4}; + TensorShape outShape = {1,1,3,4}; + TensorInfo in0(shape0, armnn::DataType::Float32); + TensorInfo in1(shape1, armnn::DataType::Float32); + TensorInfo out(outShape, armnn::DataType::Float32); + + BaseDescriptor desc; + TosaRefLayerSupport supportChecker; + std::string reasonIfNotSupported; + auto supported = supportChecker.IsLayerSupported(armnn::LayerType::Multiplication, + {in0, in1, out}, + desc, + armnn::EmptyOptional(), + armnn::EmptyOptional(), + reasonIfNotSupported); + + CHECK(supported); +} + +TEST_CASE("IsLayerSupportedTosaReferenceMultiplicationUnsupported") +{ + TensorShape shape0 = {1,1,3,4}; + TensorShape shape1 = {1,2,3,4}; + TensorShape outShape = {1,1,3,4}; + TensorInfo in0(shape0, armnn::DataType::Signed64); + TensorInfo in1(shape1, armnn::DataType::Signed64); + TensorInfo out(outShape, armnn::DataType::Signed64); + + BaseDescriptor desc; + TosaRefLayerSupport supportChecker; + std::string reasonIfNotSupported; + auto supported = supportChecker.IsLayerSupported(armnn::LayerType::Multiplication, + {in0, in1, out}, + desc, + armnn::EmptyOptional(), + armnn::EmptyOptional(), + reasonIfNotSupported); + + CHECK(!supported); +} + TEST_CASE("IsLayerSupportedTosaReferenceMaxPooling2d") { TensorShape inShape = {1,1,3,4}; @@ -376,6 +420,50 @@ TEST_CASE("IsLayerSupportedTosaReferenceSliceUnsupported") CHECK(!supported); } +TEST_CASE("IsLayerSupportedTosaReferenceSubtraction") +{ + TensorShape shape0 = {1,1,3,4}; + TensorShape shape1 = {1,1,3,4}; + TensorShape outShape = {1,1,3,4}; + TensorInfo in0(shape0, armnn::DataType::Float32); + TensorInfo in1(shape1, armnn::DataType::Float32); + TensorInfo out(outShape, armnn::DataType::Float32); + + BaseDescriptor desc; + TosaRefLayerSupport supportChecker; + std::string reasonIfNotSupported; + auto supported = supportChecker.IsLayerSupported(armnn::LayerType::Subtraction, + {in0, in1, out}, + desc, + armnn::EmptyOptional(), + armnn::EmptyOptional(), + reasonIfNotSupported); + + CHECK(supported); +} + +TEST_CASE("IsLayerSupportedTosaReferenceSubtractionUnsupported") +{ + TensorShape shape0 = {1,1,3,4}; + TensorShape shape1 = {4}; + TensorShape outShape = {1,1,3,4}; + TensorInfo in0(shape0, armnn::DataType::Signed64); + TensorInfo in1(shape1, armnn::DataType::Signed64); + TensorInfo out(outShape, armnn::DataType::Signed64); + + BaseDescriptor desc; + TosaRefLayerSupport supportChecker; + std::string reasonIfNotSupported; + auto supported = supportChecker.IsLayerSupported(armnn::LayerType::Subtraction, + {in0, in1, out}, + desc, + armnn::EmptyOptional(), + armnn::EmptyOptional(), + reasonIfNotSupported); + + CHECK(!supported); +} + TEST_CASE("IsLayerSupportedTosaReferenceTransposeConv2d") { TensorInfo inputInfo ({ 1, 3, 3, 1 }, DataType::Float32); -- cgit v1.2.1