From b30e6554ad41f21c8326e387aa2c1f8a5d4e6445 Mon Sep 17 00:00:00 2001 From: Cathal Corbett Date: Wed, 7 Dec 2022 11:50:50 +0000 Subject: IVGCVSW-7174 Add Reshape support to TOSA Reference Backend * Spelling corrections and code refactors added to TosaCommon * TosaDTypeToString() implemented and used in TosaRef IsLayerSupported() instead of enum integer. * Using namespace armnn in TosaCommon OneToOneMappingTests and TosaReference TosaRefLayerSupportTests instead of armnn::ClassName. * Updated VerifyTosaAttribute() to also verify certain attributes from input and output shapes. Signed-off-by: Cathal Corbett Change-Id: I71dfca404d081a665f748ab724153c6dc36b7eca --- src/backends/backendsCommon/test/CMakeLists.txt | 1 + .../test/ReshapeEndToEndTestImpl.hpp | 91 +++++++ src/backends/tosaCommon/TosaMappings.cpp | 5 + .../operatorMappings/AdditionOperator.cpp | 2 +- .../AvgPool2DIgnoreValueOperator.cpp | 2 +- .../tosaCommon/operatorMappings/CMakeLists.txt | 2 + .../operatorMappings/Pooling2DOperator.cpp | 2 +- .../operatorMappings/ReshapeOperator.cpp | 54 +++++ .../operatorMappings/ReshapeOperator.hpp | 20 ++ .../operatorMappings/TosaCommonOperators.hpp | 3 +- .../operatorMappings/TosaOperatorUtils.hpp | 57 +++-- .../test/AvgPool2DIgnoreValueChecker.hpp | 18 +- .../tosaCommon/test/OneToOneMappingTests.cpp | 146 +++++++---- src/backends/tosaCommon/test/TosaTestUtils.hpp | 52 +++- src/backends/tosaReference/TosaRefLayerSupport.cpp | 27 ++- .../tosaReference/test/TosaRefEndToEndTests.cpp | 17 ++ .../test/TosaRefLayerSupportTests.cpp | 266 +++++++++++++-------- 17 files changed, 579 insertions(+), 186 deletions(-) create mode 100644 src/backends/backendsCommon/test/ReshapeEndToEndTestImpl.hpp create mode 100644 src/backends/tosaCommon/operatorMappings/ReshapeOperator.cpp create mode 100644 src/backends/tosaCommon/operatorMappings/ReshapeOperator.hpp (limited to 'src') diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt index d833caa3fe..881e4d6e18 100644 --- a/src/backends/backendsCommon/test/CMakeLists.txt +++ b/src/backends/backendsCommon/test/CMakeLists.txt @@ -49,6 +49,7 @@ list(APPEND armnnBackendsCommonUnitTests_sources QuantizedLstmEndToEndTestImpl.cpp QuantizedLstmEndToEndTestImpl.hpp RankEndToEndTestImpl.hpp + ReshapeEndToEndTestImpl.hpp ResizeEndToEndTestImpl.hpp RuntimeTestImpl.hpp SpaceToDepthEndToEndTestImpl.cpp diff --git a/src/backends/backendsCommon/test/ReshapeEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ReshapeEndToEndTestImpl.hpp new file mode 100644 index 0000000000..4cefb6d352 --- /dev/null +++ b/src/backends/backendsCommon/test/ReshapeEndToEndTestImpl.hpp @@ -0,0 +1,91 @@ +// +// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include + +#include +#include + +#include + +namespace +{ + +template +armnn::INetworkPtr CreateReshapeNetwork(const armnn::TensorShape& inputShape, + const armnn::TensorShape& outputShape, + const armnn::ReshapeDescriptor& descriptor, + const float qScale = 1.0f, + const int32_t qOffset = 0) +{ + using namespace armnn; + + INetworkPtr network(INetwork::Create()); + + TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset, true); + TensorInfo outputTensorInfo(outputShape, DataType, qScale, qOffset); + + + IConnectableLayer* reshape = network->AddReshapeLayer(descriptor, "reshape"); + IConnectableLayer* input = network->AddInputLayer(0, "input"); + IConnectableLayer* output = network->AddOutputLayer(0, "output"); + + Connect(input, reshape, inputTensorInfo, 0, 0); + Connect(reshape, output, outputTensorInfo, 0, 0); + + return network; +} + +template> +void ReshapeEndToEnd(const std::vector& backends) +{ + using namespace armnn; + + const TensorShape& inputShape = { 2, 3 }; + const TensorShape& outputShape = { 6 }; + + ReshapeDescriptor descriptor; + descriptor.m_TargetShape = outputShape; + + INetworkPtr network = CreateReshapeNetwork(inputShape, outputShape, descriptor); + + CHECK(network); + + std::vector data{ 1, 2, 3, + 4, 5, 6 }; + + std::map> inputTensorData = { { 0, data } }; + std::map> expectedOutputData = { { 0, data } }; + + EndToEndLayerTestImpl(std::move(network), inputTensorData, expectedOutputData, backends); +} + +template +void ReshapeEndToEndFloat16(const std::vector& backends) +{ + using namespace armnn; + using namespace half_float::literal; + using Half = half_float::half; + + const TensorShape& inputShape = { 2, 3 }; + const TensorShape& outputShape = { 6 }; + + ReshapeDescriptor descriptor; + descriptor.m_TargetShape = outputShape; + + INetworkPtr network = CreateReshapeNetwork(inputShape, outputShape, descriptor); + CHECK(network); + + std::vector data{ 1._h, 2._h, 3._h, + 4._h, 5._h, 6._h }; + + std::map> inputTensorData = { { 0, data } }; + std::map> expectedOutputData = { { 0, data } }; + + EndToEndLayerTestImpl(std::move(network), inputTensorData, expectedOutputData, backends); +} + +} // anonymous namespace diff --git a/src/backends/tosaCommon/TosaMappings.cpp b/src/backends/tosaCommon/TosaMappings.cpp index 00ba429555..318735db77 100644 --- a/src/backends/tosaCommon/TosaMappings.cpp +++ b/src/backends/tosaCommon/TosaMappings.cpp @@ -57,6 +57,11 @@ TosaSerializationBasicBlock* GetTosaMapping(const Layer* layer, return ConvertPooling2DToTosaOperator(layer, inputs, outputs, poolDesc); } } + case LayerType::Reshape: + { + auto reshapeDesc = PolymorphicDowncast(&descriptor); + return ConvertReshapeToTosaOperator(layer, inputs, outputs, reshapeDesc); + } default: { return CreateEmptyTosaSerializationBasicBlock(); diff --git a/src/backends/tosaCommon/operatorMappings/AdditionOperator.cpp b/src/backends/tosaCommon/operatorMappings/AdditionOperator.cpp index 66ca869ac4..f1fb34c5e2 100644 --- a/src/backends/tosaCommon/operatorMappings/AdditionOperator.cpp +++ b/src/backends/tosaCommon/operatorMappings/AdditionOperator.cpp @@ -50,7 +50,7 @@ TosaSerializationBasicBlock* ConvertAdditionToTosaOperator(const Layer* layer, auto* outputTensor0 = new TosaSerializationTensor(outputName, outputShape0, outputDType0, {}); // operatorInputNames/operatorOutputNames ends up being the same as - // blockInputNames/blockOutputNames for one-to-one ArmNN to Tosa mappings + // blockInputNames/blockOutputNames for one-to-one ArmNN to TOSA mappings return new TosaSerializationBasicBlock(blockName, // name {op}, // operators {inputTensor0, inputTensor1, outputTensor0}, // tensors diff --git a/src/backends/tosaCommon/operatorMappings/AvgPool2DIgnoreValueOperator.cpp b/src/backends/tosaCommon/operatorMappings/AvgPool2DIgnoreValueOperator.cpp index 2601a6243d..7e7631dcef 100644 --- a/src/backends/tosaCommon/operatorMappings/AvgPool2DIgnoreValueOperator.cpp +++ b/src/backends/tosaCommon/operatorMappings/AvgPool2DIgnoreValueOperator.cpp @@ -101,7 +101,7 @@ TosaSerializationBasicBlock* ConvertAvgPool2DIgnoreValueToTosaOperator(const Lay auto* outputTensor = new TosaSerializationTensor(poolOutputName, outputShape, outputDType, {}); // operatorInputNames/operatorOutputNames ends up being the same as - // blockInputNames/blockOutputNames for one-to-one ArmNN to Tosa mappings + // blockInputNames/blockOutputNames for one-to-one ArmNN to TOSA mappings return new TosaSerializationBasicBlock(blockName, // name {opPad, opPool}, // operators {inputTensor, intermediateTensor, outputTensor}, // tensors diff --git a/src/backends/tosaCommon/operatorMappings/CMakeLists.txt b/src/backends/tosaCommon/operatorMappings/CMakeLists.txt index b256eddda1..7733d01abb 100644 --- a/src/backends/tosaCommon/operatorMappings/CMakeLists.txt +++ b/src/backends/tosaCommon/operatorMappings/CMakeLists.txt @@ -14,6 +14,8 @@ list(APPEND armnnTosaBackendOperators_sources Conv2dOperator.cpp Pooling2DOperator.hpp Pooling2DOperator.cpp + ReshapeOperator.hpp + ReshapeOperator.cpp TosaOperatorUtils.hpp ) diff --git a/src/backends/tosaCommon/operatorMappings/Pooling2DOperator.cpp b/src/backends/tosaCommon/operatorMappings/Pooling2DOperator.cpp index eaeb8a4cde..265901e1ae 100644 --- a/src/backends/tosaCommon/operatorMappings/Pooling2DOperator.cpp +++ b/src/backends/tosaCommon/operatorMappings/Pooling2DOperator.cpp @@ -56,7 +56,7 @@ TosaSerializationBasicBlock* ConvertPooling2DToTosaOperator(const Layer* layer, auto* outputTensor0 = new TosaSerializationTensor(outputName, outputShape0, outputDType0, {}); // operatorInputNames/operatorOutputNames ends up being the same as - // blockInputNames/blockOutputNames for one-to-one ArmNN to Tosa mappings + // blockInputNames/blockOutputNames for one-to-one ArmNN to TOSA mappings return new TosaSerializationBasicBlock(blockName, // name {op}, // operators {inputTensor0, outputTensor0}, // tensors diff --git a/src/backends/tosaCommon/operatorMappings/ReshapeOperator.cpp b/src/backends/tosaCommon/operatorMappings/ReshapeOperator.cpp new file mode 100644 index 0000000000..b88a6ef894 --- /dev/null +++ b/src/backends/tosaCommon/operatorMappings/ReshapeOperator.cpp @@ -0,0 +1,54 @@ +// +// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "ReshapeOperator.hpp" + +TosaSerializationBasicBlock* ConvertReshapeToTosaOperator(const Layer* layer, + const std::vector& inputs, + const std::vector& outputs, + const ReshapeDescriptor* reshapeDescriptor) +{ + std::string inputName = std::string("input0_"); + std::string outputName = std::string("output0_"); + std::string blockName = std::string("Op_RESHAPE_block_") + GetUniqueTosaMappingID(); + + // If a layer is present then the block will be used for execution, so input and output names need to be determined + // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter. + if(layer != nullptr) + { + // Get the layers connected to the input slots and determine unique layer names. + Layer& connectedLayer = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer(); + inputName = GenerateUniqueName(connectedLayer, 0); + + // Get the layer connected to the output slot and determine unique layer name. + Layer& connectedOutputLayer = layer->GetOutputSlot().GetConnection(0)->GetOwningLayer(); + outputName = GenerateUniqueName(connectedOutputLayer, 0); + } + + TosaReshapeAttribute attribute(GetTosaTensorShape(reshapeDescriptor->m_TargetShape)); + + auto* op = new TosaSerializationOperator(Op_RESHAPE, + Attribute_ReshapeAttribute, + &attribute, + {inputName}, + {outputName}); + + std::vector inputShape = GetTosaTensorShape(inputs[0]->GetShape()); + DType inputDType = ArmNNToDType(inputs[0]->GetDataType()); + + std::vector outputShape = GetTosaTensorShape(outputs[0]->GetShape()); + DType outputDType = ArmNNToDType(outputs[0]->GetDataType()); + + auto* inputTensor = new TosaSerializationTensor(inputName, inputShape, inputDType, {}); + auto* outputTensor = new TosaSerializationTensor(outputName, outputShape, outputDType, {}); + + // operatorInputNames/operatorOutputNames ends up being the same as + // blockInputNames/blockOutputNames for one-to-one ArmNN to TOSA mappings + return new TosaSerializationBasicBlock(blockName, // name + {op}, // operators + {inputTensor, outputTensor}, // tensors + {inputName}, // inputs + {outputName}); // outputs +} \ No newline at end of file diff --git a/src/backends/tosaCommon/operatorMappings/ReshapeOperator.hpp b/src/backends/tosaCommon/operatorMappings/ReshapeOperator.hpp new file mode 100644 index 0000000000..4f363df052 --- /dev/null +++ b/src/backends/tosaCommon/operatorMappings/ReshapeOperator.hpp @@ -0,0 +1,20 @@ +// +// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "TosaOperatorUtils.hpp" + +#include + +#include + +using namespace armnn; +using namespace tosa; + +TosaSerializationBasicBlock* ConvertReshapeToTosaOperator(const Layer* layer, + const std::vector& inputs, + const std::vector& outputs, + const ReshapeDescriptor* reshapeDescriptor); diff --git a/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp b/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp index 513db0c039..0711095a25 100644 --- a/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp +++ b/src/backends/tosaCommon/operatorMappings/TosaCommonOperators.hpp @@ -9,4 +9,5 @@ #include "ConstantOperator.hpp" #include "Conv2dOperator.hpp" #include "AvgPool2DIgnoreValueOperator.hpp" -#include "Pooling2DOperator.hpp" \ No newline at end of file +#include "Pooling2DOperator.hpp" +#include "ReshapeOperator.hpp" \ No newline at end of file diff --git a/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp b/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp index 176e4e1cfb..288966badd 100644 --- a/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp +++ b/src/backends/tosaCommon/operatorMappings/TosaOperatorUtils.hpp @@ -59,28 +59,20 @@ inline std::vector GetTosaTensorShape(const TensorShape& shape) // Function that generates unique name using the layer type, input slot and layer guid. inline std::string GenerateUniqueName(const Layer& layer, uint32_t layerSlot) { - std::string name; std::string guid = std::to_string(layer.GetGuid()); std::string slotAndGuid = std::to_string(layerSlot) + "_" + guid; - LayerType layerType = layer.GetType(); - if (layerType == LayerType::Input) + switch (layer.GetType()) { - name = "input" + slotAndGuid; - } - else if (layerType == LayerType::Output) - { - name = "output" + slotAndGuid; - } - else if (layerType == LayerType::Constant) - { - name = "constant_" + guid; - } - else - { - name = "intermediate" + slotAndGuid; + case LayerType::Input: + return "input" + slotAndGuid; + case LayerType::Output: + return "output" + slotAndGuid; + case LayerType::Constant: + return "constant_" + guid; + default: + return "intermediate" + slotAndGuid; } - return name; } // Function to return unique int as a string to ensure uniqueness between all input, output and block names. @@ -90,6 +82,37 @@ inline std::string GetUniqueTosaMappingID() return std::to_string(++uniqueTosaMappingID); } +// Function to return Tosa DType as string. +inline std::string TosaDTypeToString(DType tosaDType) +{ + switch (tosaDType) + { + case DType_UNKNOWN: + return "DType_UNKNOWN"; + case DType_BOOL: + return "DType_BOOL"; + case DType_UINT8: + return "DType_UINT8"; + case DType_INT4: + return "DType_INT4"; + case DType_INT8: + return "DType_INT8"; + case DType_INT16: + return "DType_INT16"; + case DType_INT32: + return "DType_INT32"; + case DType_INT48: + return "DType_INT48"; + case DType_FP32: + return "DType_FP32"; + case DType_UINT16: + return "DType_UINT16"; + case DType_FP16: + return "DType_FP16"; + } + return ""; +} + // Function to return Tosa Op as string. inline std::string TosaOpToString(Op tosaOp) { diff --git a/src/backends/tosaCommon/test/AvgPool2DIgnoreValueChecker.hpp b/src/backends/tosaCommon/test/AvgPool2DIgnoreValueChecker.hpp index a38f66b466..6f57c4a61e 100644 --- a/src/backends/tosaCommon/test/AvgPool2DIgnoreValueChecker.hpp +++ b/src/backends/tosaCommon/test/AvgPool2DIgnoreValueChecker.hpp @@ -68,9 +68,11 @@ void VerifyAvgPool2DIgnoreValue(TosaSerializationBasicBlock* basicBlock, CHECK(padOp->GetAttributeType() == Attribute_PadAttribute); CHECK(padOp->GetOp() == Op_PAD); - VerifyTosaAttributeFromDescriptor(descriptor, - padOp->GetAttribute(), - LayerType::Pooling2d); + VerifyTosaAttribute(descriptor, + padOp->GetAttribute(), + inputShape[0], + outputShape[0], + LayerType::Pooling2d); // // Verify average pool operator second. @@ -115,9 +117,11 @@ void VerifyAvgPool2DIgnoreValue(TosaSerializationBasicBlock* basicBlock, CHECK(poolOp->GetAttributeType() == Attribute_PoolAttribute); CHECK(poolOp->GetOp() == Op_AVG_POOL2D); - VerifyTosaAttributeFromDescriptor(descriptor, - poolOp->GetAttribute(), - LayerType::Pooling2d, - 1); + VerifyTosaAttribute(descriptor, + poolOp->GetAttribute(), + inputShape[0], + outputShape[0], + LayerType::Pooling2d, + 1); } \ No newline at end of file diff --git a/src/backends/tosaCommon/test/OneToOneMappingTests.cpp b/src/backends/tosaCommon/test/OneToOneMappingTests.cpp index af9f9e26df..b1fa6847bc 100644 --- a/src/backends/tosaCommon/test/OneToOneMappingTests.cpp +++ b/src/backends/tosaCommon/test/OneToOneMappingTests.cpp @@ -79,7 +79,7 @@ TEST_CASE("GetTosaMappingFromLayer_ConstantLayer") std::vector> outputShape = {{ 1, 2, 4, 2 }}; std::vector data = GenerateRandomData(info.GetNumElements()); - armnn::ConstTensor constTensor(info, data); + ConstTensor constTensor(info, data); IConnectableLayer* constant = net->AddConstantLayer(constTensor, "constant"); IConnectableLayer* output = net->AddOutputLayer(0, "output"); @@ -95,7 +95,7 @@ TEST_CASE("GetTosaMappingFromLayer_ConstantLayer") TEST_CASE("GetTosaMapping_Conv2dLayer") { - armnn::Convolution2dDescriptor descriptor; + Convolution2dDescriptor descriptor; descriptor.m_PadLeft = 1; descriptor.m_PadRight = 1; descriptor.m_PadTop = 1; @@ -106,10 +106,10 @@ TEST_CASE("GetTosaMapping_Conv2dLayer") descriptor.m_DilationY = 2; descriptor.m_BiasEnabled = true; - const armnn::TensorInfo inputInfo ({ 1, 5, 5, 1 }, armnn::DataType::Float32); - const armnn::TensorInfo outputInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32); - const armnn::TensorInfo weightsInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32, 0.0f, 0, true); - const armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32, 0.0f, 0, true); + const TensorInfo inputInfo ({ 1, 5, 5, 1 }, DataType::Float32); + const TensorInfo outputInfo({ 1, 3, 3, 1 }, DataType::Float32); + const TensorInfo weightsInfo({ 1, 3, 3, 1 }, DataType::Float32, 0.0f, 0, true); + const TensorInfo biasesInfo ({ 1 }, DataType::Float32, 0.0f, 0, true); std::vector> inputShape = {{ 1, 5, 5, 1 }, { 1, 3, 3, 1 }, { 1 }}; std::vector> outputShape = {{ 1, 3, 3, 1 }}; @@ -131,7 +131,7 @@ TEST_CASE("GetTosaMappingFromLayer_Conv2dLayer") // Builds up the structure of the network. INetworkPtr net(INetwork::Create()); - armnn::Convolution2dDescriptor descriptor; + Convolution2dDescriptor descriptor; descriptor.m_PadLeft = 1; descriptor.m_PadRight = 1; descriptor.m_PadTop = 1; @@ -142,25 +142,25 @@ TEST_CASE("GetTosaMappingFromLayer_Conv2dLayer") descriptor.m_DilationY = 2; descriptor.m_BiasEnabled = true; - const armnn::TensorInfo inputInfo ({ 1, 5, 5, 1 }, armnn::DataType::Float32); - const armnn::TensorInfo outputInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32); - const armnn::TensorInfo weightsInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32, 0.0f, 0, true); - const armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32, 0.0f, 0, true); + const TensorInfo inputInfo ({ 1, 5, 5, 1 }, DataType::Float32); + const TensorInfo outputInfo({ 1, 3, 3, 1 }, DataType::Float32); + const TensorInfo weightsInfo({ 1, 3, 3, 1 }, DataType::Float32, 0.0f, 0, true); + const TensorInfo biasesInfo ({ 1 }, DataType::Float32, 0.0f, 0, true); std::vector> inputShape = {{ 1, 5, 5, 1 }}; std::vector> outputShape = {{ 1, 3, 3, 1 }}; std::vector weightsData = GenerateRandomData(weightsInfo.GetNumElements()); - armnn::ConstTensor weights(weightsInfo, weightsData); + ConstTensor weights(weightsInfo, weightsData); std::vector biasesData = GenerateRandomData(biasesInfo.GetNumElements()); - armnn::ConstTensor biases(biasesInfo, biasesData); + ConstTensor biases(biasesInfo, biasesData); - armnn::IConnectableLayer* const inputLayer = net->AddInputLayer(0, "input0"); - armnn::IConnectableLayer* const weightsLayer = net->AddConstantLayer(weights, "weights"); - armnn::IConnectableLayer* const biasesLayer = net->AddConstantLayer(biases, "biases"); - armnn::IConnectableLayer* const convLayer = net->AddConvolution2dLayer(descriptor, "conv2d"); - armnn::IConnectableLayer* const outputLayer = net->AddOutputLayer(0); + IConnectableLayer* const inputLayer = net->AddInputLayer(0, "input0"); + IConnectableLayer* const weightsLayer = net->AddConstantLayer(weights, "weights"); + IConnectableLayer* const biasesLayer = net->AddConstantLayer(biases, "biases"); + IConnectableLayer* const convLayer = net->AddConvolution2dLayer(descriptor, "conv2d"); + IConnectableLayer* const outputLayer = net->AddOutputLayer(0); inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0)); weightsLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(1)); @@ -179,18 +179,18 @@ TEST_CASE("GetTosaMappingFromLayer_Conv2dLayer") TEST_CASE("GetTosaMapping_MaxPool2DLayer") { - armnn::Pooling2dDescriptor descriptor; - descriptor.m_PoolType = armnn::PoolingAlgorithm::Max; + Pooling2dDescriptor descriptor; + descriptor.m_PoolType = PoolingAlgorithm::Max; descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2; descriptor.m_StrideX = descriptor.m_StrideY = 2; descriptor.m_PadLeft = 1; descriptor.m_PadRight = 1; descriptor.m_PadTop = 1; descriptor.m_PadBottom = 1; - descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; + descriptor.m_PaddingMethod = PaddingMethod::Exclude; - armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, DataType::Float32); - armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, DataType::Float32); + TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, DataType::Float32); + TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, DataType::Float32); std::vector> inputShape = {{ 1, 1, 4, 4 }}; std::vector> outputShape = {{ 1, 1, 3, 3 }}; @@ -209,30 +209,30 @@ TEST_CASE("GetTosaMappingFromLayer_MaxPool2DLayer") // Builds up the structure of the network. INetworkPtr net(INetwork::Create()); - armnn::Pooling2dDescriptor descriptor; - descriptor.m_PoolType = armnn::PoolingAlgorithm::Max; + Pooling2dDescriptor descriptor; + descriptor.m_PoolType = PoolingAlgorithm::Max; descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2; descriptor.m_StrideX = descriptor.m_StrideY = 2; descriptor.m_PadLeft = 1; descriptor.m_PadRight = 1; descriptor.m_PadTop = 1; descriptor.m_PadBottom = 1; - descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; + descriptor.m_PaddingMethod = PaddingMethod::Exclude; - IConnectableLayer* input0 = net->AddInputLayer(0, "input0"); - IConnectableLayer* pool = net->AddPooling2dLayer(descriptor, "pool"); - IConnectableLayer* output = net->AddOutputLayer(0, "output"); + IConnectableLayer* input = net->AddInputLayer(0, "input0"); + IConnectableLayer* pool = net->AddPooling2dLayer(descriptor, "pool"); + IConnectableLayer* output = net->AddOutputLayer(0, "output"); - input0->GetOutputSlot(0).Connect(pool->GetInputSlot(0)); + input->GetOutputSlot(0).Connect(pool->GetInputSlot(0)); pool->GetOutputSlot(0).Connect(output->GetInputSlot(0)); - armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, DataType::Float32); - armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, DataType::Float32); + TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, DataType::Float32); + TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, DataType::Float32); std::vector> inputShape = {{ 1, 1, 4, 4 }}; std::vector> outputShape = {{ 1, 1, 3, 3 }}; - input0->GetOutputSlot(0).SetTensorInfo(inputTensorInfo); + input->GetOutputSlot(0).SetTensorInfo(inputTensorInfo); pool->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); TosaSerializationBasicBlock* basicBlock = @@ -243,18 +243,18 @@ TEST_CASE("GetTosaMappingFromLayer_MaxPool2DLayer") TEST_CASE("GetTosaMapping_AvgPool2DLayer") { - armnn::Pooling2dDescriptor descriptor; - descriptor.m_PoolType = armnn::PoolingAlgorithm::Average; + Pooling2dDescriptor descriptor; + descriptor.m_PoolType = PoolingAlgorithm::Average; descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2; descriptor.m_StrideX = descriptor.m_StrideY = 2; descriptor.m_PadLeft = 1; descriptor.m_PadRight = 1; descriptor.m_PadTop = 1; descriptor.m_PadBottom = 1; - descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; + descriptor.m_PaddingMethod = PaddingMethod::Exclude; - armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, DataType::Float32); - armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, DataType::Float32); + TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, DataType::Float32); + TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, DataType::Float32); std::vector> inputShape = {{ 1, 1, 4, 4 }}; std::vector> outputShape = {{ 1, 1, 3, 3 }}; @@ -278,15 +278,15 @@ TEST_CASE("GetTosaMappingFromLayer_AvgPool2DLayer") // Builds up the structure of the network. INetworkPtr net(INetwork::Create()); - armnn::Pooling2dDescriptor descriptor; - descriptor.m_PoolType = armnn::PoolingAlgorithm::Average; + Pooling2dDescriptor descriptor; + descriptor.m_PoolType = PoolingAlgorithm::Average; descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2; descriptor.m_StrideX = descriptor.m_StrideY = 2; descriptor.m_PadLeft = 1; descriptor.m_PadRight = 1; descriptor.m_PadTop = 1; descriptor.m_PadBottom = 1; - descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; + descriptor.m_PaddingMethod = PaddingMethod::Exclude; IConnectableLayer* input0 = net->AddInputLayer(0, "input0"); IConnectableLayer* pool = net->AddPooling2dLayer(descriptor, "pool"); @@ -295,8 +295,8 @@ TEST_CASE("GetTosaMappingFromLayer_AvgPool2DLayer") input0->GetOutputSlot(0).Connect(pool->GetInputSlot(0)); pool->GetOutputSlot(0).Connect(output->GetInputSlot(0)); - armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, DataType::Float32); - armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, DataType::Float32); + TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, DataType::Float32); + TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, DataType::Float32); std::vector> inputShape = {{ 1, 1, 4, 4 }}; std::vector> outputShape = {{ 1, 1, 3, 3 }}; @@ -315,6 +315,66 @@ TEST_CASE("GetTosaMappingFromLayer_AvgPool2DLayer") LayerType::Pooling2d); } +TEST_CASE("GetTosaMapping_ReshapeLayer") +{ + TensorInfo inputInfo = TensorInfo({ 2, 3 }, DataType::Float32); + TensorInfo outputInfo = TensorInfo({ 6 }, DataType::Float32); + + std::vector> inputShape = {{ 2, 3 }}; + std::vector> outputShape = {{ 6 }}; + + ReshapeDescriptor descriptor; + descriptor.m_TargetShape = { 6 }; + + TosaSerializationBasicBlock* basicBlock = + GetTosaMapping(nullptr, LayerType::Reshape, {&inputInfo}, {&outputInfo}, descriptor); + AssertTosaOneToOneMappingBasicBlock(basicBlock, + inputShape, + outputShape, + Op_RESHAPE, + Attribute_ReshapeAttribute, + descriptor, + LayerType::Reshape); +} + +TEST_CASE("GetTosaMappingFromLayer_ReshapeLayer") +{ + IRuntime::CreationOptions options; + IRuntimePtr runtime(IRuntime::Create(options)); + + // Builds up the structure of the network. + INetworkPtr net(INetwork::Create()); + + ReshapeDescriptor descriptor; + descriptor.m_TargetShape = { 6 }; + + IConnectableLayer* input = net->AddInputLayer(0, "input"); + IConnectableLayer* reshape = net->AddReshapeLayer(descriptor, "reshape"); + IConnectableLayer* output = net->AddOutputLayer(0, "output"); + + input->GetOutputSlot(0).Connect(reshape->GetInputSlot(0)); + reshape->GetOutputSlot(0).Connect(output->GetInputSlot(0)); + + TensorInfo inputInfo = TensorInfo({ 2, 3 }, DataType::Float32); + TensorInfo outputInfo = TensorInfo({ 6 }, DataType::Float32); + + input->GetOutputSlot(0).SetTensorInfo(inputInfo); + reshape->GetOutputSlot(0).SetTensorInfo(outputInfo); + + std::vector> inputShape = {{ 2, 3 }}; + std::vector> outputShape = {{ 6 }}; + + TosaSerializationBasicBlock* basicBlock = + GetTosaMappingFromLayer(PolymorphicDowncast(reshape)); + AssertTosaOneToOneMappingBasicBlock(basicBlock, + inputShape, + outputShape, + Op_RESHAPE, + Attribute_ReshapeAttribute, + descriptor, + LayerType::Reshape); +} + TEST_CASE("GetTosaMapping_Unimplemented") { TosaSerializationBasicBlock* basicBlock = diff --git a/src/backends/tosaCommon/test/TosaTestUtils.hpp b/src/backends/tosaCommon/test/TosaTestUtils.hpp index dd63c0efdf..5c10a6d638 100644 --- a/src/backends/tosaCommon/test/TosaTestUtils.hpp +++ b/src/backends/tosaCommon/test/TosaTestUtils.hpp @@ -8,16 +8,20 @@ #include #include +#include #include +#include using namespace armnn; using namespace tosa; -inline void VerifyTosaAttributeFromDescriptor(const BaseDescriptor& descriptor, - const TosaAttributeBase* attribute, - LayerType type, - uint32_t mappingOpNumber = 0) +inline void VerifyTosaAttribute(const BaseDescriptor& descriptor, + const TosaAttributeBase* attribute, + std::vector inputShape, + std::vector outputShape, + LayerType type, + uint32_t mappingOpNumber = 0) { switch (type) { @@ -100,6 +104,25 @@ inline void VerifyTosaAttributeFromDescriptor(const BaseDescriptor& descriptor, CHECK(stride == poolAttribute.stride()); break; } + case LayerType::Reshape: + { + auto reshapeDesc = PolymorphicDowncast(&descriptor); + TosaReshapeAttribute reshapeAttribute(attribute); + std::vector shapeAttrib = reshapeAttribute.new_shape(); + + CHECK(GetTosaTensorShape(reshapeDesc->m_TargetShape) == shapeAttrib); + CHECK(outputShape == shapeAttrib); + + auto numInputElements = std::accumulate(std::begin(inputShape), + std::end(inputShape), + 1, + std::multiplies()); + auto numAttributeShapeElements = std::accumulate(std::begin(shapeAttrib), + std::end(shapeAttrib), + 1, + std::multiplies()); + CHECK(numInputElements == numAttributeShapeElements); + } default: break; } @@ -195,7 +218,22 @@ inline void AssertTosaOneToOneMappingBasicBlock(TosaSerializationBasicBlock* bas } } - VerifyTosaAttributeFromDescriptor(descriptor, - op->GetAttribute(), - type); + std::vector input = {}; + std::vector output = {}; + + if (!inputShape.empty()) + { + input = inputShape[0]; + } + + if (!outputShape.empty()) + { + output = outputShape[0]; + } + + VerifyTosaAttribute(descriptor, + op->GetAttribute(), + input, + output, + type); } \ No newline at end of file diff --git a/src/backends/tosaReference/TosaRefLayerSupport.cpp b/src/backends/tosaReference/TosaRefLayerSupport.cpp index 848b7efdce..5cda85af20 100644 --- a/src/backends/tosaReference/TosaRefLayerSupport.cpp +++ b/src/backends/tosaReference/TosaRefLayerSupport.cpp @@ -36,7 +36,7 @@ static bool RunTosaLayerChecksSingleDataType(TosaSerializationOperator* op, for (auto input : inputs) { - std::string dataTypeCode = std::to_string(input->GetDtype()); + std::string dataTypeCode = TosaDTypeToString(input->GetDtype()); // Check Dtype from tensor (GetDtype) supported &= CheckSupportRule(TosaTypeAnyOf(input, supportedTypes), @@ -54,7 +54,7 @@ static bool RunTosaLayerChecksSingleDataType(TosaSerializationOperator* op, for (auto output : outputs) { - std::string dataTypeCode = std::to_string(output->GetDtype()); + std::string dataTypeCode = TosaDTypeToString(output->GetDtype()); // Check Dtype from tensor (GetDtype) supported &= CheckSupportRule(TosaTypeAnyOf(output, supportedTypes), @@ -97,8 +97,8 @@ static bool RunTosaLayerChecksInputOutputDataType(TosaSerializationOperator* op, { auto input = inputs[i]; auto output = outputs[i]; - std::string inputDataTypeCode = std::to_string(input->GetDtype()); - std::string outputDataTypeCode = std::to_string(output->GetDtype()); + std::string inputDataTypeCode = TosaDTypeToString(input->GetDtype()); + std::string outputDataTypeCode = TosaDTypeToString(output->GetDtype()); std::tuple mappingType(input->GetDtype(), output->GetDtype()); // Check Dtype from tensor (GetDtype) @@ -285,6 +285,24 @@ static bool IsTosaLayerSupported(TosaSerializationOperator* op, return RunTosaLayerChecksSingleDataType( op, inputs, outputs, supportedAttributes, supportedTypes, reasonIfUnsupported); } + case tosa::Op_RESHAPE: + { + std::vector supportedAttributes = { Attribute_ReshapeAttribute }; + + std::vector supportedTypes = + { + DType_FP16, + DType_FP32, + DType_INT8, + DType_INT16, + DType_INT32, + DType_BOOL + }; + + // Check the attribute, data types and bounds for inputs and outputs. + return RunTosaLayerChecksSingleDataType( + op, inputs, outputs, supportedAttributes, supportedTypes, reasonIfUnsupported); + } default: SetValueChecked(reasonIfUnsupported, "Operation is currently unsupported by the TOSA Reference Backend."); return false; @@ -332,6 +350,7 @@ bool TosaRefLayerSupport::IsLayerSupported(const LayerType& type, break; } case LayerType::Pooling2d: + case LayerType::Reshape: // Setup inputs and outputs inputInfos.push_back(&infos[0]); outputInfos.push_back(&infos[1]); diff --git a/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp b/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp index 4245f0d4c4..aaf8a678e3 100644 --- a/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp +++ b/src/backends/tosaReference/test/TosaRefEndToEndTests.cpp @@ -8,6 +8,7 @@ #include "backendsCommon/test/AdditionEndToEndTestImpl.hpp" #include "backendsCommon/test/Convolution2dEndToEndTestImpl.hpp" #include "backendsCommon/test/Pooling2dEndToEndTestImpl.hpp" +#include "backendsCommon/test/ReshapeEndToEndTestImpl.hpp" #include @@ -74,4 +75,20 @@ TEST_CASE("TosaRefAvgPool2DIgnoreValueEndtoEndTestFloat32") AvgPool2dEndToEnd(tosaDefaultBackends, PaddingMethod::IgnoreValue); } +// Reshape +TEST_CASE("TosaRefReshapeEndtoEndTestFloat32") +{ + ReshapeEndToEnd(tosaDefaultBackends); +} + +TEST_CASE("TosaRefReshapeEndtoEndTestInt32") +{ + ReshapeEndToEnd(tosaDefaultBackends); +} + +TEST_CASE("TosaRefReshapeEndtoEndTestFloat16") +{ + ReshapeEndToEndFloat16(tosaDefaultBackends); +} + } \ No newline at end of file diff --git a/src/backends/tosaReference/test/TosaRefLayerSupportTests.cpp b/src/backends/tosaReference/test/TosaRefLayerSupportTests.cpp index e6fbbf9688..86b01d8d0c 100644 --- a/src/backends/tosaReference/test/TosaRefLayerSupportTests.cpp +++ b/src/backends/tosaReference/test/TosaRefLayerSupportTests.cpp @@ -12,26 +12,28 @@ #include +using namespace armnn; + TEST_SUITE("TosaRefLayerSupported") { TEST_CASE("IsLayerSupportedTosaReferenceAddition") { - armnn::TensorShape shape0 = {1,1,3,4}; - armnn::TensorShape shape1 = {4}; - armnn::TensorShape outShape = {1,1,3,4}; - armnn::TensorInfo in0(shape0, armnn::DataType::Float32); - armnn::TensorInfo in1(shape1, armnn::DataType::Float32); - armnn::TensorInfo out(outShape, armnn::DataType::Float32); - - armnn::BaseDescriptor desc; - armnn::TosaRefLayerSupport supportChecker; + TensorShape shape0 = {1,1,3,4}; + TensorShape shape1 = {4}; + TensorShape outShape = {1,1,3,4}; + TensorInfo in0(shape0, DataType::Float32); + TensorInfo in1(shape1, DataType::Float32); + TensorInfo out(outShape, DataType::Float32); + + BaseDescriptor desc; + TosaRefLayerSupport supportChecker; std::string reasonIfNotSupported; - auto supported = supportChecker.IsLayerSupported(armnn::LayerType::Addition, + auto supported = supportChecker.IsLayerSupported(LayerType::Addition, {in0, in1, out}, desc, - armnn::EmptyOptional(), - armnn::EmptyOptional(), + EmptyOptional(), + EmptyOptional(), reasonIfNotSupported); CHECK(supported); @@ -39,21 +41,21 @@ TEST_CASE("IsLayerSupportedTosaReferenceAddition") TEST_CASE("IsLayerSupportedTosaReferenceAdditionUnsupported") { - armnn::TensorShape shape0 = {1,1,3,4}; - armnn::TensorShape shape1 = {4}; - armnn::TensorShape outShape = {1,1,3,4}; - armnn::TensorInfo in0(shape0, armnn::DataType::Signed64); - armnn::TensorInfo in1(shape1, armnn::DataType::Signed64); - armnn::TensorInfo out(outShape, armnn::DataType::Signed64); - - armnn::BaseDescriptor desc; - armnn::TosaRefLayerSupport supportChecker; + TensorShape shape0 = {1,1,3,4}; + TensorShape shape1 = {4}; + TensorShape outShape = {1,1,3,4}; + TensorInfo in0(shape0, DataType::Signed64); + TensorInfo in1(shape1, DataType::Signed64); + TensorInfo out(outShape, DataType::Signed64); + + BaseDescriptor desc; + TosaRefLayerSupport supportChecker; std::string reasonIfNotSupported; - auto supported = supportChecker.IsLayerSupported(armnn::LayerType::Addition, + auto supported = supportChecker.IsLayerSupported(LayerType::Addition, {in0, in1, out}, desc, - armnn::EmptyOptional(), - armnn::EmptyOptional(), + EmptyOptional(), + EmptyOptional(), reasonIfNotSupported); CHECK(!supported); @@ -63,19 +65,21 @@ TEST_CASE("IsLayerSupportedTosaReferenceAdditionUnsupported") "TOSA Reference Operator: Op_ADD for input: input1_") != std::string::npos); REQUIRE(reasonIfNotSupported.find( "TOSA Reference Operator: Op_ADD for output: output0_") != std::string::npos); + REQUIRE(reasonIfNotSupported.find( + "has an unsupported data type: DType_UNKNOWN") != std::string::npos); } TEST_CASE("IsLayerSupportedTosaReferenceConstant") { - armnn::TensorInfo outputInfo({1,1,3,4}, armnn::DataType::Float32); + TensorInfo outputInfo({1,1,3,4}, DataType::Float32); - armnn::TosaRefLayerSupport supportChecker; + TosaRefLayerSupport supportChecker; std::string reasonIfNotSupported; - auto supported = supportChecker.IsLayerSupported(armnn::LayerType::Constant, + auto supported = supportChecker.IsLayerSupported(LayerType::Constant, {outputInfo}, - armnn::BaseDescriptor(), - armnn::EmptyOptional(), - armnn::EmptyOptional(), + BaseDescriptor(), + EmptyOptional(), + EmptyOptional(), reasonIfNotSupported); CHECK(supported); @@ -83,39 +87,41 @@ TEST_CASE("IsLayerSupportedTosaReferenceConstant") TEST_CASE("IsLayerSupportedTosaReferenceConstantUnsupported") { - armnn::TensorInfo outputInfo({1,1,3,4}, armnn::DataType::Signed64); + TensorInfo outputInfo({1,1,3,4}, DataType::Signed64); - armnn::TosaRefLayerSupport supportChecker; + TosaRefLayerSupport supportChecker; std::string reasonIfNotSupported; - auto supported = supportChecker.IsLayerSupported(armnn::LayerType::Constant, + auto supported = supportChecker.IsLayerSupported(LayerType::Constant, {outputInfo}, - armnn::BaseDescriptor(), - armnn::EmptyOptional(), - armnn::EmptyOptional(), + BaseDescriptor(), + EmptyOptional(), + EmptyOptional(), reasonIfNotSupported); CHECK(!supported); REQUIRE(reasonIfNotSupported.find( "TOSA Reference Operator: Op_CONST for output: constant_") != std::string::npos); + REQUIRE(reasonIfNotSupported.find( + "has an unsupported data type: DType_UNKNOWN") != std::string::npos); } TEST_CASE("IsLayerSupportedTosaReferenceConv2d") { - armnn::TensorInfo inputInfo ({ 1, 5, 5, 1 }, armnn::DataType::Float32); - armnn::TensorInfo outputInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32); - armnn::TensorInfo weightsInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32); - armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32); + TensorInfo inputInfo ({ 1, 5, 5, 1 }, DataType::Float32); + TensorInfo outputInfo({ 1, 3, 3, 1 }, DataType::Float32); + TensorInfo weightsInfo({ 1, 3, 3, 1 }, DataType::Float32); + TensorInfo biasesInfo ({ 1 }, DataType::Float32); - armnn::Convolution2dDescriptor desc; + Convolution2dDescriptor desc; desc.m_BiasEnabled = true; - armnn::TosaRefLayerSupport supportChecker; + TosaRefLayerSupport supportChecker; std::string reasonIfNotSupported; auto supported = supportChecker.IsLayerSupported(armnn::LayerType::Convolution2d, {inputInfo, outputInfo, weightsInfo, biasesInfo}, desc, - armnn::EmptyOptional(), - armnn::EmptyOptional(), + EmptyOptional(), + EmptyOptional(), reasonIfNotSupported); CHECK(supported); @@ -124,21 +130,21 @@ TEST_CASE("IsLayerSupportedTosaReferenceConv2d") TEST_CASE("IsLayerSupportedTosaReferenceConv2dUnsupported") { // If inputs and weights are Fp32, output must match. - armnn::TensorInfo inputInfo ({ 1, 5, 5, 1 }, armnn::DataType::Float32); - armnn::TensorInfo outputInfo({ 1, 3, 3, 1 }, armnn::DataType::Signed64); - armnn::TensorInfo weightsInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32, 0.0f, 0, true); - armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32, 0.0f, 0, true); + TensorInfo inputInfo ({ 1, 5, 5, 1 }, DataType::Float32); + TensorInfo outputInfo({ 1, 3, 3, 1 }, DataType::Signed64); + TensorInfo weightsInfo({ 1, 3, 3, 1 }, DataType::Float32, 0.0f, 0, true); + TensorInfo biasesInfo ({ 1 }, DataType::Float32, 0.0f, 0, true); - armnn::Convolution2dDescriptor desc; + Convolution2dDescriptor desc; desc.m_BiasEnabled = true; - armnn::TosaRefLayerSupport supportChecker; + TosaRefLayerSupport supportChecker; std::string reasonIfNotSupported; auto supported = supportChecker.IsLayerSupported(armnn::LayerType::Convolution2d, {inputInfo, outputInfo, weightsInfo, biasesInfo}, desc, - armnn::EmptyOptional(), - armnn::EmptyOptional(), + EmptyOptional(), + EmptyOptional(), reasonIfNotSupported); CHECK(!supported); @@ -154,19 +160,19 @@ TEST_CASE("IsLayerSupportedTosaReferenceConv2dUnsupported") TEST_CASE("IsLayerSupportedTosaReferenceMaxPooling2d") { - armnn::TensorShape inShape = {1,1,3,4}; - armnn::TensorShape outShape = {1,1,3,4}; - armnn::TensorInfo in(inShape, armnn::DataType::Float32); - armnn::TensorInfo out(outShape, armnn::DataType::Float32); + TensorShape inShape = {1,1,3,4}; + TensorShape outShape = {1,1,3,4}; + TensorInfo in(inShape, DataType::Float32); + TensorInfo out(outShape, DataType::Float32); - armnn::Pooling2dDescriptor desc; - armnn::TosaRefLayerSupport supportChecker; + Pooling2dDescriptor desc; + TosaRefLayerSupport supportChecker; std::string reasonIfNotSupported; - auto supported = supportChecker.IsLayerSupported(armnn::LayerType::Pooling2d, + auto supported = supportChecker.IsLayerSupported(LayerType::Pooling2d, {in, out}, desc, - armnn::EmptyOptional(), - armnn::EmptyOptional(), + EmptyOptional(), + EmptyOptional(), reasonIfNotSupported); CHECK(supported); @@ -174,22 +180,22 @@ TEST_CASE("IsLayerSupportedTosaReferenceMaxPooling2d") TEST_CASE("IsLayerSupportedTosaReferenceAvgPooling2d_IgnoreValue") { - armnn::TensorShape inShape = {1,1,3,4}; - armnn::TensorShape outShape = {1,1,3,4}; - armnn::TensorInfo in(inShape, armnn::DataType::Float32); - armnn::TensorInfo out(outShape, armnn::DataType::Float32); + TensorShape inShape = {1,1,3,4}; + TensorShape outShape = {1,1,3,4}; + TensorInfo in(inShape, DataType::Float32); + TensorInfo out(outShape, DataType::Float32); - armnn::Pooling2dDescriptor desc; - desc.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue; - desc.m_PoolType = armnn::PoolingAlgorithm::Average; + Pooling2dDescriptor desc; + desc.m_PaddingMethod = PaddingMethod::IgnoreValue; + desc.m_PoolType = PoolingAlgorithm::Average; - armnn::TosaRefLayerSupport supportChecker; + TosaRefLayerSupport supportChecker; std::string reasonIfNotSupported; - auto supported = supportChecker.IsLayerSupported(armnn::LayerType::Pooling2d, + auto supported = supportChecker.IsLayerSupported(LayerType::Pooling2d, {in, out}, desc, - armnn::EmptyOptional(), - armnn::EmptyOptional(), + EmptyOptional(), + EmptyOptional(), reasonIfNotSupported); CHECK(supported); @@ -197,22 +203,22 @@ TEST_CASE("IsLayerSupportedTosaReferenceAvgPooling2d_IgnoreValue") TEST_CASE("IsLayerSupportedTosaReferenceAvgPooling2d_InputOutputDatatypeDifferent") { - armnn::TensorShape inShape = {1,1,3,4}; - armnn::TensorShape outShape = {1,1,3,4}; - armnn::TensorInfo in(inShape, armnn::DataType::QAsymmS8); - armnn::TensorInfo out(outShape, armnn::DataType::Signed32); + TensorShape inShape = {1,1,3,4}; + TensorShape outShape = {1,1,3,4}; + TensorInfo in(inShape, DataType::QAsymmS8); + TensorInfo out(outShape, DataType::Signed32); - armnn::Pooling2dDescriptor desc; - desc.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue; - desc.m_PoolType = armnn::PoolingAlgorithm::Average; + Pooling2dDescriptor desc; + desc.m_PaddingMethod = PaddingMethod::IgnoreValue; + desc.m_PoolType = PoolingAlgorithm::Average; - armnn::TosaRefLayerSupport supportChecker; + TosaRefLayerSupport supportChecker; std::string reasonIfNotSupported; - auto supported = supportChecker.IsLayerSupported(armnn::LayerType::Pooling2d, + auto supported = supportChecker.IsLayerSupported(LayerType::Pooling2d, {in, out}, desc, - armnn::EmptyOptional(), - armnn::EmptyOptional(), + EmptyOptional(), + EmptyOptional(), reasonIfNotSupported); CHECK(supported); @@ -220,19 +226,19 @@ TEST_CASE("IsLayerSupportedTosaReferenceAvgPooling2d_InputOutputDatatypeDifferen TEST_CASE("IsLayerSupportedTosaReferenceMaxPooling2dUnsupported") { - armnn::TensorShape inShape = {1,1,3,4}; - armnn::TensorShape outShape = {1,1,3,4}; - armnn::TensorInfo in(inShape, armnn::DataType::Signed64); - armnn::TensorInfo out(outShape, armnn::DataType::Signed64); + TensorShape inShape = {1,1,3,4}; + TensorShape outShape = {1,1,3,4}; + TensorInfo in(inShape, DataType::Signed64); + TensorInfo out(outShape, DataType::Signed64); - armnn::Pooling2dDescriptor desc; - armnn::TosaRefLayerSupport supportChecker; + Pooling2dDescriptor desc; + TosaRefLayerSupport supportChecker; std::string reasonIfNotSupported; - auto supported = supportChecker.IsLayerSupported(armnn::LayerType::Pooling2d, + auto supported = supportChecker.IsLayerSupported(LayerType::Pooling2d, {in, out}, desc, - armnn::EmptyOptional(), - armnn::EmptyOptional(), + EmptyOptional(), + EmptyOptional(), reasonIfNotSupported); CHECK(!supported); @@ -240,26 +246,28 @@ TEST_CASE("IsLayerSupportedTosaReferenceMaxPooling2dUnsupported") "TOSA Reference Operator: Op_MAX_POOL2D for input: input0_") != std::string::npos); REQUIRE(reasonIfNotSupported.find( "TOSA Reference Operator: Op_MAX_POOL2D for output: output0_") != std::string::npos); + REQUIRE(reasonIfNotSupported.find( + "has an unsupported data type: DType_UNKNOWN") != std::string::npos); } TEST_CASE("IsLayerSupportedTosaReferenceAvgPooling2dUnsupported_InputOutputDatatypeDifferent") { - armnn::TensorShape inShape = {1,1,3,4}; - armnn::TensorShape outShape = {1,1,3,4}; - armnn::TensorInfo in(inShape, armnn::DataType::Float32); - armnn::TensorInfo out(outShape, armnn::DataType::Float16); + TensorShape inShape = {1,1,3,4}; + TensorShape outShape = {1,1,3,4}; + TensorInfo in(inShape, DataType::Float32); + TensorInfo out(outShape, DataType::Float16); - armnn::Pooling2dDescriptor desc; - desc.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue; - desc.m_PoolType = armnn::PoolingAlgorithm::Average; + Pooling2dDescriptor desc; + desc.m_PaddingMethod = PaddingMethod::IgnoreValue; + desc.m_PoolType = PoolingAlgorithm::Average; - armnn::TosaRefLayerSupport supportChecker; + TosaRefLayerSupport supportChecker; std::string reasonIfNotSupported; - auto supported = supportChecker.IsLayerSupported(armnn::LayerType::Pooling2d, + auto supported = supportChecker.IsLayerSupported(LayerType::Pooling2d, {in, out}, desc, - armnn::EmptyOptional(), - armnn::EmptyOptional(), + EmptyOptional(), + EmptyOptional(), reasonIfNotSupported); CHECK(!supported); @@ -268,7 +276,57 @@ TEST_CASE("IsLayerSupportedTosaReferenceAvgPooling2dUnsupported_InputOutputDatat REQUIRE(reasonIfNotSupported.find( " and output: output0_") != std::string::npos); REQUIRE(reasonIfNotSupported.find( - " has an unsupported input data type: 8 to output data type: 10") != std::string::npos); + " has an unsupported input data type: DType_FP32 to output data type: DType_FP16") != std::string::npos); +} + +TEST_CASE("IsLayerSupportedTosaReferenceReshape") +{ + TensorShape inShape = {3,4}; + TensorShape outShape = {12}; + TensorInfo in(inShape, DataType::Float32); + TensorInfo out(outShape, DataType::Float32); + + ReshapeDescriptor desc; + desc.m_TargetShape = {12}; + + TosaRefLayerSupport supportChecker; + std::string reasonIfNotSupported; + auto supported = supportChecker.IsLayerSupported(LayerType::Reshape, + {in, out}, + desc, + EmptyOptional(), + EmptyOptional(), + reasonIfNotSupported); + + CHECK(supported); +} + +TEST_CASE("IsLayerSupportedTosaReferenceReshapeUnsupported") +{ + TensorShape inShape = {3,4}; + TensorShape outShape = {12}; + TensorInfo in(inShape, DataType::Signed64); + TensorInfo out(outShape, DataType::Signed64); + + ReshapeDescriptor desc; + desc.m_TargetShape = {12}; + + TosaRefLayerSupport supportChecker; + std::string reasonIfNotSupported; + auto supported = supportChecker.IsLayerSupported(LayerType::Reshape, + {in, out}, + desc, + EmptyOptional(), + EmptyOptional(), + reasonIfNotSupported); + + CHECK(!supported); + REQUIRE(reasonIfNotSupported.find( + "TOSA Reference Operator: Op_RESHAPE for input: input0_") != std::string::npos); + REQUIRE(reasonIfNotSupported.find( + "TOSA Reference Operator: Op_RESHAPE for output: output0_") != std::string::npos); + REQUIRE(reasonIfNotSupported.find( + "has an unsupported data type: DType_UNKNOWN") != std::string::npos); } } -- cgit v1.2.1