From 2e5d0b2e2a212ceb803681b717cbaf821f5e0929 Mon Sep 17 00:00:00 2001 From: Matthew Sloyan Date: Thu, 21 Oct 2021 14:05:31 +0100 Subject: IVGCVSW-6469 Add MirrorPad FrontEnd and Ref Support * Added PaddingMode enum to PaddingDescriptor to enable Symmetric and Reflect padding. * Added Symmetric and Reflect Ref implementation. * Added Serializer & Deserializer support. * Added unit tests. Signed-off-by: Matthew Sloyan Change-Id: I4bed907b31742b32ccefe5e8ca39a6f1e5bd9dee --- docs/01_02_deserializer_serializer.dox | 2 +- include/armnn/Descriptors.hpp | 12 +- include/armnn/Types.hpp | 11 + include/armnn/TypesUtils.hpp | 11 + src/armnn/SerializeLayerParameters.cpp | 1 + src/armnn/layers/PadLayer.cpp | 2 + src/armnnDeserializer/Deserializer.cpp | 16 +- src/armnnDeserializer/test/DeserializePad.cpp | 119 ++- src/armnnSerializer/ArmnnSchema.fbs | 7 + src/armnnSerializer/ArmnnSchema_generated.h | 53 +- src/armnnSerializer/Serializer.cpp | 3 +- src/armnnSerializer/SerializerUtils.cpp | 13 + src/armnnSerializer/SerializerUtils.hpp | 2 + src/armnnSerializer/test/SerializerTests.cpp | 30 + src/backends/aclCommon/ArmComputeTensorUtils.cpp | 6 +- src/backends/aclCommon/ArmComputeTensorUtils.hpp | 4 +- src/backends/aclCommon/ArmComputeUtils.hpp | 11 + src/backends/backendsCommon/common.mk | 1 + src/backends/backendsCommon/test/CMakeLists.txt | 2 + src/backends/backendsCommon/test/LayerTests.hpp | 1 + .../test/layerTests/MirrorPadTestImpl.cpp | 1091 ++++++++++++++++++++ .../test/layerTests/MirrorPadTestImpl.hpp | 117 +++ src/backends/cl/workloads/ClFillWorkload.cpp | 2 +- src/backends/cl/workloads/ClPadWorkload.cpp | 2 +- src/backends/neon/workloads/NeonFillWorkload.cpp | 2 +- src/backends/neon/workloads/NeonPadWorkload.cpp | 2 +- src/backends/reference/backend.mk | 1 + src/backends/reference/test/RefLayerTests.cpp | 27 +- src/backends/reference/workloads/CMakeLists.txt | 2 + src/backends/reference/workloads/MirrorPad.cpp | 199 ++++ src/backends/reference/workloads/MirrorPad.hpp | 22 + .../reference/workloads/RefPadWorkload.cpp | 19 +- 32 files changed, 1752 insertions(+), 41 deletions(-) create mode 100644 src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.cpp create mode 100644 src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.hpp create mode 100644 src/backends/reference/workloads/MirrorPad.cpp create mode 100644 src/backends/reference/workloads/MirrorPad.hpp diff --git a/docs/01_02_deserializer_serializer.dox b/docs/01_02_deserializer_serializer.dox index 6bd0d4a1a2..5d4dc43a74 100644 --- a/docs/01_02_deserializer_serializer.dox +++ b/docs/01_02_deserializer_serializer.dox @@ -54,7 +54,7 @@ The Arm NN SDK Serializer currently supports the following layers: - Multiplication - Normalization - Output -- Pad +- Pad (Constant, Symmetric, Reflect) - Permute - Pooling2d - Prelu diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp index 39ea824045..a8ad12ff8f 100644 --- a/include/armnn/Descriptors.hpp +++ b/include/armnn/Descriptors.hpp @@ -1060,17 +1060,20 @@ struct MeanDescriptor : BaseDescriptor /// A PadDescriptor for the PadLayer. struct PadDescriptor : BaseDescriptor { - PadDescriptor() : m_PadValue(0) + PadDescriptor() : m_PadValue(0), m_PaddingMode(PaddingMode::Constant) {} - PadDescriptor(const std::vector>& padList, const float& padValue = 0) + PadDescriptor(const std::vector>& padList, + const float& padValue = 0, + const PaddingMode& paddingMode = PaddingMode::Constant) : m_PadList(padList) , m_PadValue(padValue) + , m_PaddingMode(paddingMode) {} bool operator ==(const PadDescriptor& rhs) const { - return m_PadList == rhs.m_PadList && m_PadValue == rhs.m_PadValue; + return m_PadList == rhs.m_PadList && m_PadValue == rhs.m_PadValue && m_PaddingMode == rhs.m_PaddingMode; } /// @brief Specifies the padding for input dimension. @@ -1081,6 +1084,9 @@ struct PadDescriptor : BaseDescriptor /// Optional value to use for padding, defaults to 0 float m_PadValue; + + /// Specifies the Padding mode (Constant, Reflect or Symmetric) + PaddingMode m_PaddingMode; }; /// A SliceDescriptor for the SliceLayer. diff --git a/include/armnn/Types.hpp b/include/armnn/Types.hpp index 4f39ebe16a..deaa0b3a50 100644 --- a/include/armnn/Types.hpp +++ b/include/armnn/Types.hpp @@ -166,6 +166,17 @@ enum class PaddingMethod Exclude = 1 }; +/// +/// The padding mode controls whether the padding should be filled with constant values (Constant), or +/// reflect the input, either including the border values (Symmetric) or not (Reflect). +/// +enum class PaddingMode +{ + Constant = 0, + Reflect = 1, + Symmetric = 2 +}; + enum class NormalizationAlgorithmChannel { Across = 0, diff --git a/include/armnn/TypesUtils.hpp b/include/armnn/TypesUtils.hpp index a1c11b74df..ccb0280457 100644 --- a/include/armnn/TypesUtils.hpp +++ b/include/armnn/TypesUtils.hpp @@ -125,6 +125,17 @@ constexpr char const* GetPaddingMethodAsCString(PaddingMethod method) } } +constexpr char const* GetPaddingModeAsCString(PaddingMode mode) +{ + switch (mode) + { + case PaddingMode::Constant: return "Exclude"; + case PaddingMode::Symmetric: return "Symmetric"; + case PaddingMode::Reflect: return "Reflect"; + default: return "Unknown"; + } +} + constexpr char const* GetReduceOperationAsCString(ReduceOperation reduce_operation) { switch (reduce_operation) diff --git a/src/armnn/SerializeLayerParameters.cpp b/src/armnn/SerializeLayerParameters.cpp index 3fc93df727..c60d4faf79 100644 --- a/src/armnn/SerializeLayerParameters.cpp +++ b/src/armnn/SerializeLayerParameters.cpp @@ -293,6 +293,7 @@ void StringifyLayerParameters::Serialize(ParameterStringifyFuncti fn("PadList", ss.str()); } fn("PadValue", std::to_string(desc.m_PadValue)); + fn("PaddingMode", GetPaddingModeAsCString(desc.m_PaddingMode)); } void StringifyLayerParameters::Serialize(ParameterStringifyFunction& fn, diff --git a/src/armnn/layers/PadLayer.cpp b/src/armnn/layers/PadLayer.cpp index 78af9d3c47..bbe92af912 100644 --- a/src/armnn/layers/PadLayer.cpp +++ b/src/armnn/layers/PadLayer.cpp @@ -23,6 +23,7 @@ std::unique_ptr PadLayer::CreateWorkload(const armnn::IWorkloadFactor { PadQueueDescriptor descriptor; descriptor.m_Parameters.m_PadList = m_Param.m_PadList; + descriptor.m_Parameters.m_PaddingMode = m_Param.m_PaddingMode; SetAdditionalInfo(descriptor); return factory.CreatePad(descriptor, PrepInfoAndDesc(descriptor)); @@ -33,6 +34,7 @@ PadLayer* PadLayer::Clone(Graph& graph) const auto layer = CloneBase(graph, m_Param, GetName()); layer->m_Param.m_PadList = m_Param.m_PadList; + layer->m_Param.m_PaddingMode = m_Param.m_PaddingMode; return std::move(layer); } diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp index c088ef7b54..bfd4f6b560 100644 --- a/src/armnnDeserializer/Deserializer.cpp +++ b/src/armnnDeserializer/Deserializer.cpp @@ -577,6 +577,19 @@ armnn::UnaryOperation ToUnaryOperation(armnnSerializer::UnaryOperation operation } } +armnn::PaddingMode ToPaddingMode(armnnSerializer::PaddingMode paddingMode) +{ + switch (paddingMode) + { + case armnnSerializer::PaddingMode::PaddingMode_Reflect: + return armnn::PaddingMode::Reflect; + case armnnSerializer::PaddingMode::PaddingMode_Symmetric: + return armnn::PaddingMode::Symmetric; + default: + return armnn::PaddingMode::Constant; + } +} + armnn::ResizeMethod ToResizeMethod(armnnSerializer::ResizeMethod method) { switch (method) @@ -2064,6 +2077,7 @@ void IDeserializer::DeserializerImpl::ParsePad(GraphPtr graph, unsigned int laye auto flatBufferDescriptor = graph->layers()->Get(layerIndex)->layer_as_PadLayer()->descriptor(); auto flatBufferPadList = flatBufferDescriptor->padList(); + auto paddingMode = flatBufferDescriptor->paddingMode(); float padValue = flatBufferDescriptor->padValue(); if (flatBufferPadList->Length() % 2 != 0) @@ -2079,7 +2093,7 @@ void IDeserializer::DeserializerImpl::ParsePad(GraphPtr graph, unsigned int laye padList.emplace_back(flatBufferPadList->Get(i), flatBufferPadList->Get(i+1)); } - armnn::PadDescriptor descriptor(padList, padValue); + armnn::PadDescriptor descriptor(padList, padValue, ToPaddingMode(paddingMode)); auto layerName = GetLayerName(graph, layerIndex); IConnectableLayer* layer = m_Network->AddPadLayer(descriptor, layerName.c_str()); diff --git a/src/armnnDeserializer/test/DeserializePad.cpp b/src/armnnDeserializer/test/DeserializePad.cpp index 43de22912f..ade097483c 100644 --- a/src/armnnDeserializer/test/DeserializePad.cpp +++ b/src/armnnDeserializer/test/DeserializePad.cpp @@ -12,10 +12,11 @@ TEST_SUITE("Deserializer_Pad") { struct PadFixture : public ParserFlatbuffersSerializeFixture { - explicit PadFixture(const std::string &inputShape, - const std::string &padList, - const std::string &outputShape, - const std::string &dataType) + explicit PadFixture(const std::string& inputShape, + const std::string& padList, + const std::string& outputShape, + const std::string& dataType, + const std::string& paddingMode) { m_JsonString = R"( { @@ -67,6 +68,7 @@ struct PadFixture : public ParserFlatbuffersSerializeFixture }, descriptor: { padList: )" + padList + R"(, + paddingMode: )" + paddingMode + R"(, } } }, @@ -106,23 +108,108 @@ struct SimplePadFixture : PadFixture SimplePadFixture() : PadFixture("[ 2, 2, 2 ]", "[ 0, 1, 2, 1, 2, 2 ]", "[ 3, 5, 6 ]", - "QuantisedAsymm8") {} + "QuantisedAsymm8", + "Constant") {} }; TEST_CASE_FIXTURE(SimplePadFixture, "SimplePadQuantisedAsymm8") { RunTest<3, armnn::DataType::QAsymmU8>(0, - { - 0, 4, 2, 5, 6, 1, 5, 2 - }, - { - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 4, 0, 0, 0, 0, 2, 5, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, - 1, 0, 0, 0, 0, 5, 2, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 - }); + { + 0, 4, 2, 5, 6, 1, 5, 2 + }, + { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 4, 0, 0, 0, 0, 2, 5, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, + 1, 0, 0, 0, 0, 5, 2, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + }); +} + +struct SimplePadSymmetricFixture : PadFixture +{ + SimplePadSymmetricFixture() : PadFixture("[ 2, 2, 2 ]", + "[ 1, 1, 1, 1, 1, 1 ]", + "[ 4, 4, 4 ]", + "QuantisedAsymm8", + "Symmetric") {} +}; + +TEST_CASE_FIXTURE(SimplePadSymmetricFixture, "SimplePadSymmetricQuantisedAsymm8") +{ + RunTest<3, armnn::DataType::QAsymmU8>(0, + { + 1, 2, + 3, 4, + + 5, 6, + 7, 8 + }, + { + 1, 1, 2, 2, + 1, 1, 2, 2, + 3, 3, 4, 4, + 3, 3, 4, 4, + + 1, 1, 2, 2, + 1, 1, 2, 2, + 3, 3, 4, 4, + 3, 3, 4, 4, + + 5, 5, 6, 6, + 5, 5, 6, 6, + 7, 7, 8, 8, + 7, 7, 8, 8, + + 5, 5, 6, 6, + 5, 5, 6, 6, + 7, 7, 8, 8, + 7, 7, 8, 8 + }); +} + +struct SimplePadReflectFixture : PadFixture +{ + SimplePadReflectFixture() : PadFixture("[ 2, 2, 2 ]", + "[ 1, 1, 1, 1, 1, 1 ]", + "[ 4, 4, 4 ]", + "QuantisedAsymm8", + "Reflect") {} +}; + +TEST_CASE_FIXTURE(SimplePadReflectFixture, "SimplePadReflectQuantisedAsymm8") +{ + RunTest<3, armnn::DataType::QAsymmU8>(0, + { + 1, 2, + 3, 4, + + 5, 6, + 7, 8 + }, + { + 8, 7, 8, 7, + 6, 5, 6, 5, + 8, 7, 8, 7, + 6, 5, 6, 5, + + 4, 3, 4, 3, + 2, 1, 2, 1, + 4, 3, 4, 3, + 2, 1, 2, 1, + + 8, 7, 8, 7, + 6, 5, 6, 5, + 8, 7, 8, 7, + 6, 5, 6, 5, + + 4, 3, 4, 3, + 2, 1, 2, 1, + 4, 3, 4, 3, + 2, 1, 2, 1 + }); } } diff --git a/src/armnnSerializer/ArmnnSchema.fbs b/src/armnnSerializer/ArmnnSchema.fbs index c577a11a52..40de3496b0 100644 --- a/src/armnnSerializer/ArmnnSchema.fbs +++ b/src/armnnSerializer/ArmnnSchema.fbs @@ -619,9 +619,16 @@ table PadLayer { descriptor:PadDescriptor; } +enum PaddingMode : byte { + Constant = 0, + Reflect = 1, + Symmetric = 2 +} + table PadDescriptor { padList:[uint]; padValue:float = 0; + paddingMode:PaddingMode = Constant; } /// @deprecated Use ElementwiseUnaryLayer instead diff --git a/src/armnnSerializer/ArmnnSchema_generated.h b/src/armnnSerializer/ArmnnSchema_generated.h index 712ad28574..7747f9edd9 100644 --- a/src/armnnSerializer/ArmnnSchema_generated.h +++ b/src/armnnSerializer/ArmnnSchema_generated.h @@ -1198,6 +1198,39 @@ inline const char *EnumNameNormalizationAlgorithmMethod(NormalizationAlgorithmMe return EnumNamesNormalizationAlgorithmMethod()[index]; } +enum PaddingMode { + PaddingMode_Constant = 0, + PaddingMode_Reflect = 1, + PaddingMode_Symmetric = 2, + PaddingMode_MIN = PaddingMode_Constant, + PaddingMode_MAX = PaddingMode_Symmetric +}; + +inline const PaddingMode (&EnumValuesPaddingMode())[3] { + static const PaddingMode values[] = { + PaddingMode_Constant, + PaddingMode_Reflect, + PaddingMode_Symmetric + }; + return values; +} + +inline const char * const *EnumNamesPaddingMode() { + static const char * const names[4] = { + "Constant", + "Reflect", + "Symmetric", + nullptr + }; + return names; +} + +inline const char *EnumNamePaddingMode(PaddingMode e) { + if (flatbuffers::IsOutRange(e, PaddingMode_Constant, PaddingMode_Symmetric)) return ""; + const size_t index = static_cast(e); + return EnumNamesPaddingMode()[index]; +} + enum Layer { Layer_NONE = 0, Layer_ActivationLayer = 1, @@ -6383,7 +6416,8 @@ struct PadDescriptor FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef PadDescriptorBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_PADLIST = 4, - VT_PADVALUE = 6 + VT_PADVALUE = 6, + VT_PADDINGMODE = 8 }; const flatbuffers::Vector *padList() const { return GetPointer *>(VT_PADLIST); @@ -6391,11 +6425,15 @@ struct PadDescriptor FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { float padValue() const { return GetField(VT_PADVALUE, 0.0f); } + armnnSerializer::PaddingMode paddingMode() const { + return static_cast(GetField(VT_PADDINGMODE, 0)); + } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_PADLIST) && verifier.VerifyVector(padList()) && VerifyField(verifier, VT_PADVALUE) && + VerifyField(verifier, VT_PADDINGMODE) && verifier.EndTable(); } }; @@ -6410,6 +6448,9 @@ struct PadDescriptorBuilder { void add_padValue(float padValue) { fbb_.AddElement(PadDescriptor::VT_PADVALUE, padValue, 0.0f); } + void add_paddingMode(armnnSerializer::PaddingMode paddingMode) { + fbb_.AddElement(PadDescriptor::VT_PADDINGMODE, static_cast(paddingMode), 0); + } explicit PadDescriptorBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); @@ -6425,22 +6466,26 @@ struct PadDescriptorBuilder { inline flatbuffers::Offset CreatePadDescriptor( flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset> padList = 0, - float padValue = 0.0f) { + float padValue = 0.0f, + armnnSerializer::PaddingMode paddingMode = armnnSerializer::PaddingMode_Constant) { PadDescriptorBuilder builder_(_fbb); builder_.add_padValue(padValue); builder_.add_padList(padList); + builder_.add_paddingMode(paddingMode); return builder_.Finish(); } inline flatbuffers::Offset CreatePadDescriptorDirect( flatbuffers::FlatBufferBuilder &_fbb, const std::vector *padList = nullptr, - float padValue = 0.0f) { + float padValue = 0.0f, + armnnSerializer::PaddingMode paddingMode = armnnSerializer::PaddingMode_Constant) { auto padList__ = padList ? _fbb.CreateVector(*padList) : 0; return armnnSerializer::CreatePadDescriptor( _fbb, padList__, - padValue); + padValue, + paddingMode); } /// @deprecated Use ElementwiseUnaryLayer instead diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp index 84a9d53b69..c08784352d 100644 --- a/src/armnnSerializer/Serializer.cpp +++ b/src/armnnSerializer/Serializer.cpp @@ -894,7 +894,8 @@ void SerializerStrategy::SerializePadLayer(const armnn::IConnectableLayer* layer auto flatBufferPadDesc = serializer::CreatePadDescriptor(m_flatBufferBuilder, m_flatBufferBuilder.CreateVector(padList), - padDescriptor.m_PadValue); + padDescriptor.m_PadValue, + GetFlatBufferPaddingMode(padDescriptor.m_PaddingMode)); auto flatBufferPadLayer = serializer::CreatePadLayer(m_flatBufferBuilder, flatBufferBaseLayer, diff --git a/src/armnnSerializer/SerializerUtils.cpp b/src/armnnSerializer/SerializerUtils.cpp index 5ad27715c4..49ce7217dc 100644 --- a/src/armnnSerializer/SerializerUtils.cpp +++ b/src/armnnSerializer/SerializerUtils.cpp @@ -170,6 +170,19 @@ armnnSerializer::PaddingMethod GetFlatBufferPaddingMethod(armnn::PaddingMethod p } } +armnnSerializer::PaddingMode GetFlatBufferPaddingMode(armnn::PaddingMode paddingMode) +{ + switch (paddingMode) + { + case armnn::PaddingMode::Reflect: + return armnnSerializer::PaddingMode::PaddingMode_Reflect; + case armnn::PaddingMode::Symmetric: + return armnnSerializer::PaddingMode::PaddingMode_Symmetric; + default: + return armnnSerializer::PaddingMode::PaddingMode_Constant; + } +} + armnnSerializer::NormalizationAlgorithmChannel GetFlatBufferNormalizationAlgorithmChannel( armnn::NormalizationAlgorithmChannel normalizationAlgorithmChannel) { diff --git a/src/armnnSerializer/SerializerUtils.hpp b/src/armnnSerializer/SerializerUtils.hpp index 55179864e8..07cdc2a491 100644 --- a/src/armnnSerializer/SerializerUtils.hpp +++ b/src/armnnSerializer/SerializerUtils.hpp @@ -27,6 +27,8 @@ armnnSerializer::OutputShapeRounding GetFlatBufferOutputShapeRounding( armnnSerializer::PaddingMethod GetFlatBufferPaddingMethod(armnn::PaddingMethod paddingMethod); +armnnSerializer::PaddingMode GetFlatBufferPaddingMode(armnn::PaddingMode paddingMode); + armnnSerializer::NormalizationAlgorithmChannel GetFlatBufferNormalizationAlgorithmChannel( armnn::NormalizationAlgorithmChannel normalizationAlgorithmChannel); diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp index 2bffe0b9fd..e32b90837d 100644 --- a/src/armnnSerializer/test/SerializerTests.cpp +++ b/src/armnnSerializer/test/SerializerTests.cpp @@ -1684,6 +1684,36 @@ TEST_CASE("SerializePad") deserializedNetwork->ExecuteStrategy(verifier); } +TEST_CASE("SerializePadReflect") +{ + const std::string layerName("padReflect"); + const armnn::TensorInfo inputTensorInfo = armnn::TensorInfo({1, 2, 3, 4}, armnn::DataType::Float32); + const armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 5, 7}, armnn::DataType::Float32); + + armnn::PadDescriptor desc({{0, 0}, {1, 0}, {1, 1}, {1, 2}}); + desc.m_PaddingMode = armnn::PaddingMode::Reflect; + + armnn::INetworkPtr network = armnn::INetwork::Create(); + armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0); + armnn::IConnectableLayer* const padLayer = network->AddPadLayer(desc, layerName.c_str()); + armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0); + + inputLayer->GetOutputSlot(0).Connect(padLayer->GetInputSlot(0)); + padLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); + + inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo); + padLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); + + armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); + CHECK(deserializedNetwork); + + LayerVerifierBaseWithDescriptor verifier(layerName, + {inputTensorInfo}, + {outputTensorInfo}, + desc); + deserializedNetwork->ExecuteStrategy(verifier); +} + TEST_CASE("EnsurePadBackwardCompatibility") { // The PadDescriptor is being extended with a float PadValue (so a value other than 0 diff --git a/src/backends/aclCommon/ArmComputeTensorUtils.cpp b/src/backends/aclCommon/ArmComputeTensorUtils.cpp index 62f3263a0c..8bbaea71b3 100644 --- a/src/backends/aclCommon/ArmComputeTensorUtils.cpp +++ b/src/backends/aclCommon/ArmComputeTensorUtils.cpp @@ -254,9 +254,9 @@ arm_compute::Size2D BuildArmComputeSize2D(const unsigned int width, const unsign return arm_compute::Size2D(width, height); } -arm_compute::PixelValue GetPixelValue(arm_compute::ITensor& input, float pixelValue) +arm_compute::PixelValue GetPixelValue(const arm_compute::ITensorInfo* tensorInfo, float pixelValue) { - switch (input.info()->data_type()) + switch (tensorInfo->data_type()) { case arm_compute::DataType::F16: return arm_compute::PixelValue(static_cast(pixelValue)); @@ -273,7 +273,7 @@ arm_compute::PixelValue GetPixelValue(arm_compute::ITensor& input, float pixelVa return arm_compute::PixelValue(static_cast(pixelValue)); default: throw InvalidArgumentException("Unsupported DataType: [" + - std::to_string(static_cast(input.info()->data_type())) + "]"); + std::to_string(static_cast(tensorInfo->data_type())) + "]"); } } diff --git a/src/backends/aclCommon/ArmComputeTensorUtils.hpp b/src/backends/aclCommon/ArmComputeTensorUtils.hpp index ad5d4614fe..30df31b79d 100644 --- a/src/backends/aclCommon/ArmComputeTensorUtils.hpp +++ b/src/backends/aclCommon/ArmComputeTensorUtils.hpp @@ -65,8 +65,8 @@ arm_compute::PermutationVector BuildArmComputeTransposeVector(const armnn::Permu /// Utility function used to setup an arm_compute::Size2D object from width and height values. arm_compute::Size2D BuildArmComputeSize2D(const unsigned int width, const unsigned int height); -/// Gets the appropriate PixelValue for the input DataType -arm_compute::PixelValue GetPixelValue(arm_compute::ITensor& input, float pixelValue); +/// Gets the appropriate PixelValue for the TensorInfo DataType +arm_compute::PixelValue GetPixelValue(const arm_compute::ITensorInfo* tensorInfo, float pixelValue); /// Utility function used to setup an arm_compute::PadStrideInfo object from an armnn layer descriptor. template diff --git a/src/backends/aclCommon/ArmComputeUtils.hpp b/src/backends/aclCommon/ArmComputeUtils.hpp index 2f767891a1..f096346c38 100644 --- a/src/backends/aclCommon/ArmComputeUtils.hpp +++ b/src/backends/aclCommon/ArmComputeUtils.hpp @@ -300,6 +300,17 @@ inline arm_compute::Conv3dInfo ComputeConv3DInfo(const armnn::Convolution3dQueue return arm_compute::Conv3dInfo{stride, padding, activationInfo, dilation, roundType, isFastMathEnabled}; } +inline arm_compute::PaddingMode ConvertPaddingModeToAcl(const PaddingMode& paddingMode) +{ + switch (paddingMode) + { + case PaddingMode::Constant: return arm_compute::PaddingMode::CONSTANT; + case PaddingMode::Reflect: return arm_compute::PaddingMode::REFLECT; + case PaddingMode::Symmetric: return arm_compute::PaddingMode::SYMMETRIC; + default: throw InvalidArgumentException("Unsupported Padding Mode"); + } +} + inline arm_compute::ReductionOperation ConvertReductionOperationToAcl(const ReduceDescriptor& descriptor) { switch (descriptor.m_ReduceOperation) diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk index f90a7c855e..a77ec06035 100644 --- a/src/backends/backendsCommon/common.mk +++ b/src/backends/backendsCommon/common.mk @@ -77,6 +77,7 @@ COMMON_TEST_SOURCES := \ test/layerTests/LstmTestImpl.cpp \ test/layerTests/MaximumTestImpl.cpp \ test/layerTests/MinimumTestImpl.cpp \ + test/layerTests/MirrorPadTestImpl.cpp \ test/layerTests/MultiplicationTestImpl.cpp \ test/layerTests/NegTestImpl.cpp \ test/layerTests/NormalizationTestImpl.cpp \ diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt index 9272ae749c..cd62242421 100644 --- a/src/backends/backendsCommon/test/CMakeLists.txt +++ b/src/backends/backendsCommon/test/CMakeLists.txt @@ -129,6 +129,8 @@ list(APPEND armnnBackendsCommonUnitTests_sources layerTests/MeanTestImpl.hpp layerTests/MinimumTestImpl.cpp layerTests/MinimumTestImpl.hpp + layerTests/MirrorPadTestImpl.cpp + layerTests/MirrorPadTestImpl.hpp layerTests/MultiplicationTestImpl.cpp layerTests/MultiplicationTestImpl.hpp layerTests/NegTestImpl.cpp diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp index 0dcd3d1564..b51ff3357f 100644 --- a/src/backends/backendsCommon/test/LayerTests.hpp +++ b/src/backends/backendsCommon/test/LayerTests.hpp @@ -43,6 +43,7 @@ #include #include #include +#include #include #include #include diff --git a/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.cpp new file mode 100644 index 0000000000..61899db00e --- /dev/null +++ b/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.cpp @@ -0,0 +1,1091 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "MirrorPadTestImpl.hpp" + +#include + +#include +#include + +#include + +// +// Implementation templates +// + +template +LayerTestResult MirrorPad2dTestCommon( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory, + const armnn::TensorInfo& inputTensorInfo, + const armnn::TensorInfo& outputTensorInfo, + const std::vector& inputValues, + const std::vector& expectedOutputValues, + const std::vector>& padList, + const armnn::PaddingMode paddingMode) +{ + IgnoreUnused(memoryManager); + std::vector actualOutput(outputTensorInfo.GetNumElements()); + + std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); + + armnn::PadQueueDescriptor descriptor; + + descriptor.m_Parameters.m_PadList = padList; + descriptor.m_Parameters.m_PaddingMode = paddingMode; + armnn::WorkloadInfo info; + + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr workload = workloadFactory.CreatePad(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), inputValues.data()); + + ExecuteWorkload(*workload, memoryManager); + + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); + + return LayerTestResult(actualOutput, + expectedOutputValues, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); +} + +template +LayerTestResult MirrorPad3dTestCommon( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory, + const armnn::TensorInfo& inputTensorInfo, + const armnn::TensorInfo& outputTensorInfo, + const std::vector& inputValues, + const std::vector& expectedOutputValues, + const std::vector>& padList, + const armnn::PaddingMode paddingMode) +{ + IgnoreUnused(memoryManager); + std::vector actualOutput(outputTensorInfo.GetNumElements()); + + std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); + + armnn::PadQueueDescriptor descriptor; + descriptor.m_Parameters.m_PadList = padList; + descriptor.m_Parameters.m_PaddingMode = paddingMode; + + armnn::WorkloadInfo info; + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr workload = workloadFactory.CreatePad(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), inputValues.data()); + + ExecuteWorkload(*workload, memoryManager); + + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); + + return LayerTestResult(actualOutput, + expectedOutputValues, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); +} + +template +LayerTestResult MirrorPad4dTestCommon( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory, + const armnn::TensorInfo& inputTensorInfo, + const armnn::TensorInfo& outputTensorInfo, + const std::vector& inputValues, + const std::vector& expectedOutputValues, + const std::vector>& padList, + const armnn::PaddingMode paddingMode) +{ + IgnoreUnused(memoryManager); + std::vector actualOutput(outputTensorInfo.GetNumElements()); + + std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); + + armnn::PadQueueDescriptor descriptor; + descriptor.m_Parameters.m_PadList = padList; + descriptor.m_Parameters.m_PaddingMode = paddingMode; + + armnn::WorkloadInfo info; + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr workload = workloadFactory.CreatePad(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), inputValues.data()); + + ExecuteWorkload(*workload, memoryManager); + + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); + + return LayerTestResult(actualOutput, + expectedOutputValues, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); +} + +template> +LayerTestResult PadSymmetric2dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory, + float qScale, + int32_t qOffset) +{ + const armnn::TensorShape inputShape{ 3, 3 }; + const armnn::TensorShape outputShape{ 7, 7 }; + + const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset); + const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset); + + std::vector inputValues = armnnUtils::QuantizedVector( + { + // Height (3) x Width (3) + 1, 2, 3, + 4, 5, 6, + 7, 8, 9 + }, + qScale, qOffset); + + std::vector expectedOutputValues = armnnUtils::QuantizedVector( + { + 5, 4, 4, 5, 6, 6, 5, + 2, 1, 1, 2, 3, 3, 2, + 2, 1, 1, 2, 3, 3, 2, + 5, 4, 4, 5, 6, 6, 5, + 8, 7, 7, 8, 9, 9, 8, + 8, 7, 7, 8, 9, 9, 8, + 5, 4, 4, 5, 6, 6, 5 + }, + qScale, qOffset); + + std::vector> padList; + padList.push_back(std::pair(2,2)); + padList.push_back(std::pair(2,2)); + + return MirrorPad2dTestCommon(workloadFactory, + memoryManager, + tensorHandleFactory, + inputTensorInfo, + outputTensorInfo, + inputValues, + expectedOutputValues, + padList, + armnn::PaddingMode::Symmetric); +} + +template> +LayerTestResult PadReflect2dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory, + float qScale, + int32_t qOffset) +{ + const armnn::TensorShape inputShape{ 3, 3 }; + const armnn::TensorShape outputShape{ 7, 7 }; + + const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset); + const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset); + + std::vector inputValues = armnnUtils::QuantizedVector( + { + // Height (3) x Width (3) + 1, 2, 3, + 4, 5, 6, + 7, 8, 9 + }, + qScale, qOffset); + + std::vector expectedOutputValues = armnnUtils::QuantizedVector( + { + 9, 8, 7, 8, 9, 8, 7, + 6, 5, 4, 5, 6, 5, 4, + 3, 2, 1, 2, 3, 2, 1, + 6, 5, 4, 5, 6, 5, 4, + 9, 8, 7, 8, 9, 8, 7, + 6, 5, 4, 5, 6, 5, 4, + 3, 2, 1, 2, 3, 2, 1 + }, + qScale, qOffset); + + std::vector> padList; + padList.push_back(std::pair(2,2)); + padList.push_back(std::pair(2,2)); + + return MirrorPad2dTestCommon(workloadFactory, + memoryManager, + tensorHandleFactory, + inputTensorInfo, + outputTensorInfo, + inputValues, + expectedOutputValues, + padList, + armnn::PaddingMode::Reflect); +} + +template> +LayerTestResult PadSymmetric3dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory, + float qScale, + int32_t qOffset) +{ + const armnn::TensorShape inputShape{ 2, 2, 2 }; + const armnn::TensorShape outputShape{ 4, 4, 4 }; + + const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset); + const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset); + + std::vector inputValues = armnnUtils::QuantizedVector( + { + // Channel 0, Height (2) x Width (2) + 1, 2, + 3, 4, + + // Channel 1, Height (2) x Width (2) + 5, 6, + 7, 8 + }, + qScale, qOffset); + + std::vector expectedOutputValues = armnnUtils::QuantizedVector( + { + 1, 1, 2, 2, + 1, 1, 2, 2, + 3, 3, 4, 4, + 3, 3, 4, 4, + + 1, 1, 2, 2, + 1, 1, 2, 2, + 3, 3, 4, 4, + 3, 3, 4, 4, + + 5, 5, 6, 6, + 5, 5, 6, 6, + 7, 7, 8, 8, + 7, 7, 8, 8, + + 5, 5, 6, 6, + 5, 5, 6, 6, + 7, 7, 8, 8, + 7, 7, 8, 8 + }, + qScale, qOffset); + + std::vector> padList; + padList.push_back(std::pair(1,1)); + padList.push_back(std::pair(1,1)); + padList.push_back(std::pair(1,1)); + + return MirrorPad3dTestCommon(workloadFactory, + memoryManager, + tensorHandleFactory, + inputTensorInfo, + outputTensorInfo, + inputValues, + expectedOutputValues, + padList, + armnn::PaddingMode::Symmetric); +} + +template> +LayerTestResult PadReflect3dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory, + float qScale, + int32_t qOffset) +{ + const armnn::TensorShape inputShape{ 2, 2, 2 }; + const armnn::TensorShape outputShape{ 4, 4, 4 }; + + const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset); + const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset); + + std::vector inputValues = armnnUtils::QuantizedVector( + { + // Channel 0, Height (2) x Width (2) + 1, 2, + 3, 4, + + // Channel 1, Height (2) x Width (2) + 5, 6, + 7, 8 + }, + qScale, qOffset); + + std::vector expectedOutputValues = armnnUtils::QuantizedVector( + { + 8, 7, 8, 7, + 6, 5, 6, 5, + 8, 7, 8, 7, + 6, 5, 6, 5, + + 4, 3, 4, 3, + 2, 1, 2, 1, + 4, 3, 4, 3, + 2, 1, 2, 1, + + 8, 7, 8, 7, + 6, 5, 6, 5, + 8, 7, 8, 7, + 6, 5, 6, 5, + + 4, 3, 4, 3, + 2, 1, 2, 1, + 4, 3, 4, 3, + 2, 1, 2, 1 + }, + qScale, qOffset); + + std::vector> padList; + padList.push_back(std::pair(1,1)); + padList.push_back(std::pair(1,1)); + padList.push_back(std::pair(1,1)); + + return MirrorPad3dTestCommon(workloadFactory, + memoryManager, + tensorHandleFactory, + inputTensorInfo, + outputTensorInfo, + inputValues, + expectedOutputValues, + padList, + armnn::PaddingMode::Reflect); +} + +template> +LayerTestResult PadSymmetric4dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory, + float qScale, + int32_t qOffset) +{ + const armnn::TensorShape inputShape{ 2, 2, 2, 2 }; + const armnn::TensorShape outputShape{ 6, 6, 6, 6 }; + + const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset); + const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset); + + std::vector inputValues = armnnUtils::QuantizedVector( + { + // Batch 0, Channel 0, Height (2) x Width (2) + 1, 2, + 3, 4, + + // Batch 0, Channel 1, Height (2) x Width (2) + 5, 6, + 7, 8, + + // Batch 1, Channel 0, Height (2) x Width (2) + 9, 10, + 11, 12, + + // Batch 1, Channel 1, Height (2) x Width (2) + 13, 14, + 15, 16, + }, + qScale, qOffset); + + std::vector expectedOutputValues = armnnUtils::QuantizedVector( + { + 16, 15, 15, 16, 16, 15, + 14, 13, 13, 14, 14, 13, + 14, 13, 13, 14, 14, 13, + 16, 15, 15, 16, 16, 15, + 16, 15, 15, 16, 16, 15, + 14, 13, 13, 14, 14, 13, + + 12, 11, 11, 12, 12, 11, + 10, 9, 9, 10, 10, 9, + 10, 9, 9, 10, 10, 9, + 12, 11, 11, 12, 12, 11, + 12, 11, 11, 12, 12, 11, + 10, 9, 9, 10, 10, 9, + + 12, 11, 11, 12, 12, 11, + 10, 9, 9, 10, 10, 9, + 10, 9, 9, 10, 10, 9, + 12, 11, 11, 12, 12, 11, + 12, 11, 11, 12, 12, 11, + 10, 9, 9, 10, 10, 9, + + 16, 15, 15, 16, 16, 15, + 14, 13, 13, 14, 14, 13, + 14, 13, 13, 14, 14, 13, + 16, 15, 15, 16, 16, 15, + 16, 15, 15, 16, 16, 15, + 14, 13, 13, 14, 14, 13, + + 16, 15, 15, 16, 16, 15, + 14, 13, 13, 14, 14, 13, + 14, 13, 13, 14, 14, 13, + 16, 15, 15, 16, 16, 15, + 16, 15, 15, 16, 16, 15, + 14, 13, 13, 14, 14, 13, + + 12, 11, 11, 12, 12, 11, + 10, 9, 9, 10, 10, 9, + 10, 9, 9, 10, 10, 9, + 12, 11, 11, 12, 12, 11, + 12, 11, 11, 12, 12, 11, + 10, 9, 9, 10, 10, 9, + + + 8, 7, 7, 8, 8, 7, + 6, 5, 5, 6, 6, 5, + 6, 5, 5, 6, 6, 5, + 8, 7, 7, 8, 8, 7, + 8, 7, 7, 8, 8, 7, + 6, 5, 5, 6, 6, 5, + + 4, 3, 3, 4, 4, 3, + 2, 1, 1, 2, 2, 1, + 2, 1, 1, 2, 2, 1, + 4, 3, 3, 4, 4, 3, + 4, 3, 3, 4, 4, 3, + 2, 1, 1, 2, 2, 1, + + 4, 3, 3, 4, 4, 3, + 2, 1, 1, 2, 2, 1, + 2, 1, 1, 2, 2, 1, + 4, 3, 3, 4, 4, 3, + 4, 3, 3, 4, 4, 3, + 2, 1, 1, 2, 2, 1, + + 8, 7, 7, 8, 8, 7, + 6, 5, 5, 6, 6, 5, + 6, 5, 5, 6, 6, 5, + 8, 7, 7, 8, 8, 7, + 8, 7, 7, 8, 8, 7, + 6, 5, 5, 6, 6, 5, + + 8, 7, 7, 8, 8, 7, + 6, 5, 5, 6, 6, 5, + 6, 5, 5, 6, 6, 5, + 8, 7, 7, 8, 8, 7, + 8, 7, 7, 8, 8, 7, + 6, 5, 5, 6, 6, 5, + + 4, 3, 3, 4, 4, 3, + 2, 1, 1, 2, 2, 1, + 2, 1, 1, 2, 2, 1, + 4, 3, 3, 4, 4, 3, + 4, 3, 3, 4, 4, 3, + 2, 1, 1, 2, 2, 1, + + + 8, 7, 7, 8, 8, 7, + 6, 5, 5, 6, 6, 5, + 6, 5, 5, 6, 6, 5, + 8, 7, 7, 8, 8, 7, + 8, 7, 7, 8, 8, 7, + 6, 5, 5, 6, 6, 5, + + 4, 3, 3, 4, 4, 3, + 2, 1, 1, 2, 2, 1, + 2, 1, 1, 2, 2, 1, + 4, 3, 3, 4, 4, 3, + 4, 3, 3, 4, 4, 3, + 2, 1, 1, 2, 2, 1, + + 4, 3, 3, 4, 4, 3, + 2, 1, 1, 2, 2, 1, + 2, 1, 1, 2, 2, 1, + 4, 3, 3, 4, 4, 3, + 4, 3, 3, 4, 4, 3, + 2, 1, 1, 2, 2, 1, + + 8, 7, 7, 8, 8, 7, + 6, 5, 5, 6, 6, 5, + 6, 5, 5, 6, 6, 5, + 8, 7, 7, 8, 8, 7, + 8, 7, 7, 8, 8, 7, + 6, 5, 5, 6, 6, 5, + + 8, 7, 7, 8, 8, 7, + 6, 5, 5, 6, 6, 5, + 6, 5, 5, 6, 6, 5, + 8, 7, 7, 8, 8, 7, + 8, 7, 7, 8, 8, 7, + 6, 5, 5, 6, 6, 5, + + 4, 3, 3, 4, 4, 3, + 2, 1, 1, 2, 2, 1, + 2, 1, 1, 2, 2, 1, + 4, 3, 3, 4, 4, 3, + 4, 3, 3, 4, 4, 3, + 2, 1, 1, 2, 2, 1, + + + 16, 15, 15, 16, 16, 15, + 14, 13, 13, 14, 14, 13, + 14, 13, 13, 14, 14, 13, + 16, 15, 15, 16, 16, 15, + 16, 15, 15, 16, 16, 15, + 14, 13, 13, 14, 14, 13, + + 12, 11, 11, 12, 12, 11, + 10, 9, 9, 10, 10, 9, + 10, 9, 9, 10, 10, 9, + 12, 11, 11, 12, 12, 11, + 12, 11, 11, 12, 12, 11, + 10, 9, 9, 10, 10, 9, + + 12, 11, 11, 12, 12, 11, + 10, 9, 9, 10, 10, 9, + 10, 9, 9, 10, 10, 9, + 12, 11, 11, 12, 12, 11, + 12, 11, 11, 12, 12, 11, + 10, 9, 9, 10, 10, 9, + + 16, 15, 15, 16, 16, 15, + 14, 13, 13, 14, 14, 13, + 14, 13, 13, 14, 14, 13, + 16, 15, 15, 16, 16, 15, + 16, 15, 15, 16, 16, 15, + 14, 13, 13, 14, 14, 13, + + 16, 15, 15, 16, 16, 15, + 14, 13, 13, 14, 14, 13, + 14, 13, 13, 14, 14, 13, + 16, 15, 15, 16, 16, 15, + 16, 15, 15, 16, 16, 15, + 14, 13, 13, 14, 14, 13, + + 12, 11, 11, 12, 12, 11, + 10, 9, 9, 10, 10, 9, + 10, 9, 9, 10, 10, 9, + 12, 11, 11, 12, 12, 11, + 12, 11, 11, 12, 12, 11, + 10, 9, 9, 10, 10, 9, + + + 16, 15, 15, 16, 16, 15, + 14, 13, 13, 14, 14, 13, + 14, 13, 13, 14, 14, 13, + 16, 15, 15, 16, 16, 15, + 16, 15, 15, 16, 16, 15, + 14, 13, 13, 14, 14, 13, + + 12, 11, 11, 12, 12, 11, + 10, 9, 9, 10, 10, 9, + 10, 9, 9, 10, 10, 9, + 12, 11, 11, 12, 12, 11, + 12, 11, 11, 12, 12, 11, + 10, 9, 9, 10, 10, 9, + + 12, 11, 11, 12, 12, 11, + 10, 9, 9, 10, 10, 9, + 10, 9, 9, 10, 10, 9, + 12, 11, 11, 12, 12, 11, + 12, 11, 11, 12, 12, 11, + 10, 9, 9, 10, 10, 9, + + 16, 15, 15, 16, 16, 15, + 14, 13, 13, 14, 14, 13, + 14, 13, 13, 14, 14, 13, + 16, 15, 15, 16, 16, 15, + 16, 15, 15, 16, 16, 15, + 14, 13, 13, 14, 14, 13, + + 16, 15, 15, 16, 16, 15, + 14, 13, 13, 14, 14, 13, + 14, 13, 13, 14, 14, 13, + 16, 15, 15, 16, 16, 15, + 16, 15, 15, 16, 16, 15, + 14, 13, 13, 14, 14, 13, + + 12, 11, 11, 12, 12, 11, + 10, 9, 9, 10, 10, 9, + 10, 9, 9, 10, 10, 9, + 12, 11, 11, 12, 12, 11, + 12, 11, 11, 12, 12, 11, + 10, 9, 9, 10, 10, 9, + + + 8, 7, 7, 8, 8, 7, + 6, 5, 5, 6, 6, 5, + 6, 5, 5, 6, 6, 5, + 8, 7, 7, 8, 8, 7, + 8, 7, 7, 8, 8, 7, + 6, 5, 5, 6, 6, 5, + + 4, 3, 3, 4, 4, 3, + 2, 1, 1, 2, 2, 1, + 2, 1, 1, 2, 2, 1, + 4, 3, 3, 4, 4, 3, + 4, 3, 3, 4, 4, 3, + 2, 1, 1, 2, 2, 1, + + 4, 3, 3, 4, 4, 3, + 2, 1, 1, 2, 2, 1, + 2, 1, 1, 2, 2, 1, + 4, 3, 3, 4, 4, 3, + 4, 3, 3, 4, 4, 3, + 2, 1, 1, 2, 2, 1, + + 8, 7, 7, 8, 8, 7, + 6, 5, 5, 6, 6, 5, + 6, 5, 5, 6, 6, 5, + 8, 7, 7, 8, 8, 7, + 8, 7, 7, 8, 8, 7, + 6, 5, 5, 6, 6, 5, + + 8, 7, 7, 8, 8, 7, + 6, 5, 5, 6, 6, 5, + 6, 5, 5, 6, 6, 5, + 8, 7, 7, 8, 8, 7, + 8, 7, 7, 8, 8, 7, + 6, 5, 5, 6, 6, 5, + + 4, 3, 3, 4, 4, 3, + 2, 1, 1, 2, 2, 1, + 2, 1, 1, 2, 2, 1, + 4, 3, 3, 4, 4, 3, + 4, 3, 3, 4, 4, 3, + 2, 1, 1, 2, 2, 1 + }, + qScale, qOffset); + + std::vector> padList; + padList.push_back(std::pair(2,2)); + padList.push_back(std::pair(2,2)); + padList.push_back(std::pair(2,2)); + padList.push_back(std::pair(2,2)); + + return MirrorPad4dTestCommon(workloadFactory, + memoryManager, + tensorHandleFactory, + inputTensorInfo, + outputTensorInfo, + inputValues, + expectedOutputValues, + padList, + armnn::PaddingMode::Symmetric); +} + +template> +LayerTestResult PadReflect4dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory, + float qScale, + int32_t qOffset) +{ + const armnn::TensorShape inputShape{ 2, 2, 2, 2 }; + const armnn::TensorShape outputShape{ 4, 4, 4, 4 }; + + const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset); + const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset); + + std::vector inputValues = armnnUtils::QuantizedVector( + { + // Batch 0, Channel 0, Height (2) x Width (2) + 1, 2, + 3, 4, + + // Batch 0, Channel 1, Height (2) x Width (2) + 5, 6, + 7, 8, + + // Batch 1, Channel 0, Height (2) x Width (2) + 9, 10, + 11, 12, + + // Batch 1, Channel 1, Height (2) x Width (2) + 13, 14, + 15, 16, + }, + qScale, qOffset); + + std::vector expectedOutputValues = armnnUtils::QuantizedVector( + { + 16, 15, 16, 15, + 14, 13, 14, 13, + 16, 15, 16, 15, + 14, 13, 14, 13, + + 12, 11, 12, 11, + 10, 9, 10, 9, + 12, 11, 12, 11, + 10, 9, 10, 9, + + 16, 15, 16, 15, + 14, 13, 14, 13, + 16, 15, 16, 15, + 14, 13, 14, 13, + + 12, 11, 12, 11, + 10, 9, 10, 9, + 12, 11, 12, 11, + 10, 9, 10, 9, + + + 8, 7, 8, 7, + 6, 5, 6, 5, + 8, 7, 8, 7, + 6, 5, 6, 5, + + 4, 3, 4, 3, + 2, 1, 2, 1, + 4, 3, 4, 3, + 2, 1, 2, 1, + + 8, 7, 8, 7, + 6, 5, 6, 5, + 8, 7, 8, 7, + 6, 5, 6, 5, + + 4, 3, 4, 3, + 2, 1, 2, 1, + 4, 3, 4, 3, + 2, 1, 2, 1, + + + 16, 15, 16, 15, + 14, 13, 14, 13, + 16, 15, 16, 15, + 14, 13, 14, 13, + + 12, 11, 12, 11, + 10, 9, 10, 9, + 12, 11, 12, 11, + 10, 9, 10, 9, + + 16, 15, 16, 15, + 14, 13, 14, 13, + 16, 15, 16, 15, + 14, 13, 14, 13, + + 12, 11, 12, 11, + 10, 9, 10, 9, + 12, 11, 12, 11, + 10, 9, 10, 9, + + + 8, 7, 8, 7, + 6, 5, 6, 5, + 8, 7, 8, 7, + 6, 5, 6, 5, + + 4, 3, 4, 3, + 2, 1, 2, 1, + 4, 3, 4, 3, + 2, 1, 2, 1, + + 8, 7, 8, 7, + 6, 5, 6, 5, + 8, 7, 8, 7, + 6, 5, 6, 5, + + 4, 3, 4, 3, + 2, 1, 2, 1, + 4, 3, 4, 3, + 2, 1, 2, 1 + }, + qScale, qOffset); + + std::vector> padList; + padList.push_back(std::pair(1,1)); + padList.push_back(std::pair(1,1)); + padList.push_back(std::pair(1,1)); + padList.push_back(std::pair(1,1)); + + return MirrorPad4dTestCommon(workloadFactory, + memoryManager, + tensorHandleFactory, + inputTensorInfo, + outputTensorInfo, + inputValues, + expectedOutputValues, + padList, + armnn::PaddingMode::Reflect); +} + +LayerTestResult PadSymmetricFloat16( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + using namespace half_float::literal; + + const armnn::TensorShape inputShape{ 3, 3 }; + const armnn::TensorShape outputShape{ 5, 7 }; + + const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float16); + const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float16); + + const std::vector inputValues = + { + 1._h, 2._h, 3._h, + 4._h, 5._h, 6._h, + 7._h, 8._h, 9._h + }; + + std::vector expectedOutputValues = + { + 2._h, 1._h, 1._h, 2._h, 3._h, 3._h, 2._h, + 2._h, 1._h, 1._h, 2._h, 3._h, 3._h, 2._h, + 5._h, 4._h, 4._h, 5._h, 6._h, 6._h, 5._h, + 8._h, 7._h, 7._h, 8._h, 9._h, 9._h, 8._h, + 8._h, 7._h, 7._h, 8._h, 9._h, 9._h, 8._h, + }; + + std::vector> padList; + padList.push_back(std::pair(1,1)); + padList.push_back(std::pair(2,2)); + + return MirrorPad2dTestCommon(workloadFactory, + memoryManager, + tensorHandleFactory, + inputTensorInfo, + outputTensorInfo, + inputValues, + expectedOutputValues, + padList, + armnn::PaddingMode::Symmetric); +} + +LayerTestResult PadReflectFloat16( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + using namespace half_float::literal; + + const armnn::TensorShape inputShape{ 3, 3 }; + const armnn::TensorShape outputShape{ 7, 5 }; + + const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float16); + const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float16); + + const std::vector inputValues = + { + 1._h, 2._h, 3._h, + 4._h, 5._h, 6._h, + 7._h, 8._h, 9._h + }; + + std::vector expectedOutputValues = + { + 8._h, 7._h, 8._h, 9._h, 8._h, + 5._h, 4._h, 5._h, 6._h, 5._h, + 2._h, 1._h, 2._h, 3._h, 2._h, + 5._h, 4._h, 5._h, 6._h, 5._h, + 8._h, 7._h, 8._h, 9._h, 8._h, + 5._h, 4._h, 5._h, 6._h, 5._h, + 2._h, 1._h, 2._h, 3._h, 2._h, + }; + + std::vector> padList; + padList.push_back(std::pair(2,2)); + padList.push_back(std::pair(1,1)); + + return MirrorPad2dTestCommon(workloadFactory, + memoryManager, + tensorHandleFactory, + inputTensorInfo, + outputTensorInfo, + inputValues, + expectedOutputValues, + padList, + armnn::PaddingMode::Reflect); +} + +// +// Implementation functions +// + +LayerTestResult PadSymmetric2dFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadSymmetric2dTest(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0); +} + +LayerTestResult PadReflect2dFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadReflect2dTest(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0); +} + +LayerTestResult PadSymmetric3dFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadSymmetric3dTest(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0); +} + +LayerTestResult PadReflect3dFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadReflect3dTest(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0); +} + +LayerTestResult PadSymmetric3dUint8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadSymmetric3dTest( + workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 128); +} + +LayerTestResult PadReflect3dUint8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadReflect3dTest(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 128); +} + +LayerTestResult PadSymmetric3dInt8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadSymmetric3dTest(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 64); +} + +LayerTestResult PadReflect3dInt8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadReflect3dTest(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 64); +} + +LayerTestResult PadSymmetric4dFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadSymmetric4dTest(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0); +} + +LayerTestResult PadReflect4dFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadReflect4dTest(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0); +} + +LayerTestResult PadSymmetric4dBFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadSymmetric4dTest(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0); +} + +LayerTestResult PadReflect4dBFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadReflect4dTest(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0); +} + +LayerTestResult PadSymmetric4dUint8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadSymmetric4dTest( + workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 128); +} + +LayerTestResult PadReflect4dUint8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadReflect4dTest(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 128); +} + +LayerTestResult PadSymmetric4dInt8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadSymmetric4dTest(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 64); +} + +LayerTestResult PadReflect4dInt8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadReflect4dTest(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 64); +} + +LayerTestResult PadSymmetric4dInt16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadSymmetric4dTest(workloadFactory, memoryManager, tensorHandleFactory, 2.0f, 0); +} + +LayerTestResult PadReflect4dInt16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadReflect4dTest(workloadFactory, memoryManager, tensorHandleFactory, 2.0f, 0); +} + +LayerTestResult PadSymmetricFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadSymmetricFloat16(workloadFactory, memoryManager, tensorHandleFactory); +} + +LayerTestResult PadReflectFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadReflectFloat16(workloadFactory, memoryManager, tensorHandleFactory); +} diff --git a/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.hpp new file mode 100644 index 0000000000..52898b820c --- /dev/null +++ b/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.hpp @@ -0,0 +1,117 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "LayerTestResult.hpp" + +#include + +#include + +#include + +#include +#include + +LayerTestResult PadSymmetric2dFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +LayerTestResult PadReflect2dFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +LayerTestResult PadSymmetric3dFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +LayerTestResult PadReflect3dFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +LayerTestResult PadSymmetric3dUint8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +LayerTestResult PadReflect3dUint8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +LayerTestResult PadSymmetric3dInt8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +LayerTestResult PadReflect3dInt8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +LayerTestResult PadSymmetric4dFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +LayerTestResult PadReflect4dFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +LayerTestResult PadSymmetric4dBFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +LayerTestResult PadReflect4dBFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +LayerTestResult PadSymmetric4dUint8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +LayerTestResult PadReflect4dUint8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +LayerTestResult PadSymmetric4dInt8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +LayerTestResult PadReflect4dInt8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +LayerTestResult PadSymmetric4dInt16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +LayerTestResult PadReflect4dInt16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +LayerTestResult PadSymmetricFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +LayerTestResult PadReflectFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); \ No newline at end of file diff --git a/src/backends/cl/workloads/ClFillWorkload.cpp b/src/backends/cl/workloads/ClFillWorkload.cpp index 8cb2db4b25..ea42dcfc8b 100644 --- a/src/backends/cl/workloads/ClFillWorkload.cpp +++ b/src/backends/cl/workloads/ClFillWorkload.cpp @@ -29,7 +29,7 @@ ClFillWorkload::ClFillWorkload(const FillQueueDescriptor& descriptor, m_Data.ValidateInputsOutputs("ClFillWorkload", 1, 1); arm_compute::ICLTensor& output = static_cast(this->m_Data.m_Outputs[0])->GetTensor(); - arm_compute::PixelValue pixelValue = GetPixelValue(output, descriptor.m_Parameters.m_Value); + arm_compute::PixelValue pixelValue = GetPixelValue(output.info(), descriptor.m_Parameters.m_Value); m_Layer.configure(clCompileContext, &output, pixelValue); } diff --git a/src/backends/cl/workloads/ClPadWorkload.cpp b/src/backends/cl/workloads/ClPadWorkload.cpp index 10c8907d43..46975102db 100644 --- a/src/backends/cl/workloads/ClPadWorkload.cpp +++ b/src/backends/cl/workloads/ClPadWorkload.cpp @@ -39,7 +39,7 @@ ClPadWorkload::ClPadWorkload(const PadQueueDescriptor& descriptor, arm_compute::PaddingList padList = static_cast(reversed_PadList); - arm_compute::PixelValue pixelValue = GetPixelValue(input, descriptor.m_Parameters.m_PadValue); + arm_compute::PixelValue pixelValue = GetPixelValue(input.info(), descriptor.m_Parameters.m_PadValue); m_Layer.configure(clCompileContext, &input, &output, padList, pixelValue); } diff --git a/src/backends/neon/workloads/NeonFillWorkload.cpp b/src/backends/neon/workloads/NeonFillWorkload.cpp index 0a3c7f0c88..3cfa56ab54 100644 --- a/src/backends/neon/workloads/NeonFillWorkload.cpp +++ b/src/backends/neon/workloads/NeonFillWorkload.cpp @@ -28,7 +28,7 @@ NeonFillWorkload::NeonFillWorkload(const FillQueueDescriptor& descriptor, const m_Data.ValidateInputsOutputs("NeonFillWorkload", 1, 1); arm_compute::ITensor& output = static_cast(m_Data.m_Outputs[0])->GetTensor(); - arm_compute::PixelValue pixelValue = GetPixelValue(output, descriptor.m_Parameters.m_Value); + arm_compute::PixelValue pixelValue = GetPixelValue(output.info(), descriptor.m_Parameters.m_Value); auto layer = std::make_unique(); layer->configure(&output, pixelValue); diff --git a/src/backends/neon/workloads/NeonPadWorkload.cpp b/src/backends/neon/workloads/NeonPadWorkload.cpp index b378d5f843..42fc42ba5c 100644 --- a/src/backends/neon/workloads/NeonPadWorkload.cpp +++ b/src/backends/neon/workloads/NeonPadWorkload.cpp @@ -38,7 +38,7 @@ NeonPadWorkload::NeonPadWorkload(const PadQueueDescriptor& descriptor, const Wor arm_compute::PaddingList padList = static_cast(reversed_PadList); - arm_compute::PixelValue pixelValue = GetPixelValue(input, descriptor.m_Parameters.m_PadValue); + arm_compute::PixelValue pixelValue = GetPixelValue(input.info(), descriptor.m_Parameters.m_PadValue); auto layer = std::make_unique(); layer->configure(&input, &output, padList, pixelValue); diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk index f8169a6c0c..7049279557 100644 --- a/src/backends/reference/backend.mk +++ b/src/backends/reference/backend.mk @@ -41,6 +41,7 @@ BACKEND_SOURCES := \ workloads/Lstm.cpp \ workloads/LstmUtils.cpp \ workloads/Concatenate.cpp \ + workloads/MirrorPad.cpp \ workloads/Pad.cpp \ workloads/Pooling2d.cpp \ workloads/PreluImpl.cpp \ diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp index cb31b37161..5993270173 100644 --- a/src/backends/reference/test/RefLayerTests.cpp +++ b/src/backends/reference/test/RefLayerTests.cpp @@ -1415,7 +1415,7 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(LogSoftmaxFloat16_2, LogSoftmaxTest2) ARMNN_AUTO_TEST_CASE_WITH_THF(LogSoftmaxFloat16_4, LogSoftmaxTest4) -// Pad +// Pad - Constant ARMNN_AUTO_TEST_CASE_WITH_THF(PadBFloat162d, PadBFloat162dTest) ARMNN_AUTO_TEST_CASE_WITH_THF(PadBFloat162dCustomPadding, PadBFloat162dCustomPaddingTest) ARMNN_AUTO_TEST_CASE_WITH_THF(PadBFloat163d, PadBFloat163dTest) @@ -1445,6 +1445,31 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(PadInt84d, PadInt84dTest) ARMNN_AUTO_TEST_CASE_WITH_THF(PadQAsymmS8, PadQAsymmTestCommon, -2.0f, 3, 0.0f) ARMNN_AUTO_TEST_CASE_WITH_THF(PadQAsymmS8CustomPadding, PadQAsymmTestCommon, -2.0f, 3, 2.0f) +// Pad - Symmetric & Reflect +ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetric2dFloat32, PadSymmetric2dFloat32Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflect2dFloat32, PadReflect2dFloat32Test) + +ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetric3dFloat32, PadSymmetric3dFloat32Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflect3dFloat32, PadReflect3dFloat32Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetric3dUint8, PadSymmetric3dUint8Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflect3dUint8, PadReflect3dUint8Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetric3dInt8, PadSymmetric3dInt8Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflect3dInt8, PadReflect3dInt8Test) + +ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetric4dFloat32, PadSymmetric4dFloat32Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflect4dFloat32, PadReflect4dFloat32Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetric4dBFloat16, PadSymmetric4dBFloat16Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflect4dBFloat16, PadReflect4dBFloat16Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetric4dUint8, PadSymmetric4dUint8Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflect4dUint8, PadReflect4dUint8Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetric4dInt8, PadSymmetric4dInt8Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflect4dInt8, PadReflect4dInt8Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetric4dInt16, PadSymmetric4dInt16Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflect4dInt16, PadReflect4dInt16Test) + +ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetricFloat16, PadSymmetricFloat16Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflectFloat16, PadReflectFloat16Test) + // Constant ARMNN_AUTO_TEST_CASE_WITH_THF(Constant, ConstantTest) ARMNN_AUTO_TEST_CASE_WITH_THF(ConstantUint8, ConstantUint8CustomQuantizationScaleAndOffsetTest) diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt index 5727291be3..f212522895 100644 --- a/src/backends/reference/workloads/CMakeLists.txt +++ b/src/backends/reference/workloads/CMakeLists.txt @@ -52,6 +52,8 @@ list(APPEND armnnRefBackendWorkloads_sources Concatenate.hpp Concatenate.cpp Minimum.hpp + MirrorPad.cpp + MirrorPad.hpp Pad.cpp Pad.hpp Pooling2d.cpp diff --git a/src/backends/reference/workloads/MirrorPad.cpp b/src/backends/reference/workloads/MirrorPad.cpp new file mode 100644 index 0000000000..7388fed147 --- /dev/null +++ b/src/backends/reference/workloads/MirrorPad.cpp @@ -0,0 +1,199 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "MirrorPad.hpp" + +#include "BaseIterator.hpp" +#include "Decoders.hpp" +#include "Encoders.hpp" + +namespace +{ + +// Convert a linear index into n-dimensional coordinates. +// E.g. index = 2 returns [0, 0, 2]. +inline std::vector IndexToCoord(const armnn::TensorShape& shape, unsigned int index) +{ + unsigned int numOfElements = shape.GetNumElements(); + + ARMNN_ASSERT_MSG(index <= numOfElements, "Index has to be in [0, num_elements]"); + ARMNN_ASSERT_MSG(numOfElements != 0, "Cannot create coordinate from empty shape"); + + std::vector coord(shape.GetNumDimensions()); + for(unsigned int i = 0; i < shape.GetNumDimensions(); ++i) + { + numOfElements /= shape[i]; + coord[i] = index / numOfElements; + index %= numOfElements; + } + + return coord; +} + +// Returns the index of a given coordinate. +// E.g. [0, 0, 2] returns 2. +inline unsigned int CoordToIndex(const armnn::TensorShape& shape, const std::vector& coord) +{ + ARMNN_ASSERT_MSG(shape.GetNumDimensions() != 0, "Cannot get index from empty shape"); + ARMNN_ASSERT_MSG(coord.size() != 0, "Cannot get index of empty coordinate"); + + unsigned int index = 0; + unsigned int dimSize = 1; + + for (unsigned int i = shape.GetNumDimensions(); i > 0; --i) + { + index += coord[i - 1] * dimSize; + dimSize *= shape[i - 1]; + } + + return index; +} + +} // anonymous namespace + +namespace armnn +{ + +void MirrorPad(const TensorInfo& inputInfo, + const TensorInfo& outputInfo, + const ITensorHandle* inputHandle, + ITensorHandle* outputHandle, + const PadQueueDescriptor& data) +{ + auto padList = data.m_Parameters.m_PadList; + PaddingMode paddingMode = data.m_Parameters.m_PaddingMode; + + TensorShape outputShape = outputInfo.GetShape(); + TensorShape inputShape = inputInfo.GetShape(); + + unsigned int numOutputElements = outputInfo.GetNumElements(); + unsigned int numInputDimensions = inputShape.GetNumDimensions(); + assert(numInputDimensions == outputShape.GetNumDimensions()); + + // If padding mode is Reflect then both paddings must be no greater than inputShape(i) - 1. + // If padding mode is Symmetric then both paddings must be no greater than inputShape(i). + const unsigned int isReflect = static_cast(paddingMode == PaddingMode::Reflect); + for(unsigned int i = 0; i < padList.size(); ++i) + { + if(padList.at(i).first > (inputShape[i] - isReflect) || + padList.at(i).second > (inputShape[i] - isReflect)) + { + throw armnn::InvalidArgumentException("Paddings must be less (Reflect) or " + "equal (Symmetric) to the dimension size."); + } + } + + auto inputData = MakeDecoder(inputInfo, inputHandle->Map()); + auto outData = MakeEncoder(outputInfo, outputHandle->Map()); + + Decoder& input = *inputData; + Encoder& output = *outData; + + for(unsigned int idx = 0; idx < numOutputElements; ++idx) + { + // Get the coordinates of the current index in vector form. E.g inx 1 = [0, 0, 0, 1 ] + const std::vector coord = IndexToCoord(outputShape, idx); + + std::vector dimensions; + std::vector coords; + + for(unsigned int i = 0; i < numInputDimensions; ++i) + { + dimensions.emplace_back(i); + coords.emplace_back(coord[i]); + } + + auto isInPadding = [&](unsigned int i) + { + return (coords[i] < padList[i].first || coords[i] > inputShape[i] + padList[i].first - 1); + }; + + auto getReflectIndex = [&](unsigned int i) -> unsigned int + { + if(isInPadding(i)) + { + if(coords[i] < padList[i].first) + { + return padList[i].first - coords[i]; + } + else + { + return 2 * inputShape[i] + padList[i].first - 2 - coords[i]; + } + } + return coords[i] - padList[i].first; + }; + + auto getSymmetricIndex = [&](unsigned int i) -> unsigned int + { + if(isInPadding(i)) + { + if(coords[i] < padList[i].first) + { + return padList[i].first - coords[i] - 1; + } + else + { + return 2 * inputShape[i] + padList[i].first - 1 - coords[i]; + } + } + return coords[i] - padList[i].first; + }; + + // Location of the value in the input tensor to use in the output. + std::vector coordOfInput; + + // any_of works as a loop here to check if any of the dimensions are in the padding. + // If dimensions is in the padding area, then create the coordinates of the location in the + // input tensor to use in the output. + // E.g. + // Input tensor = [ 1, 2, 3 ], Rank = 1. + // Output tensor = [ 2, 1, 2, 3, 1 ] if Reflect or [ 1, 1, 2, 3, 3 ] if Symmetric with a padding of (1, 1). + // So it will either return [ 1 ] or [ 0 ] which is used to set the first value in the output tensor and so on. + if(std::any_of(dimensions.begin(), dimensions.end(), isInPadding)) + { + switch(paddingMode) + { + case PaddingMode::Reflect: + { + for(unsigned int i = 0; i < numInputDimensions; ++i) + { + coordOfInput.emplace_back(getReflectIndex(i)); + } + break; + } + case PaddingMode::Symmetric: + { + for(unsigned int i = 0; i < numInputDimensions; ++i) + { + coordOfInput.emplace_back(getSymmetricIndex(i)); + } + break; + } + default: + throw InvalidArgumentException("Padding mode not supported."); + break; + } + } + else + { + for(unsigned int i = 0; i < numInputDimensions; ++i) + { + coordOfInput.emplace_back(coord[i] - padList[i].first); + } + } + + // Set output value using the coordinate of the input value to use. + const unsigned int indexOfInput = CoordToIndex(inputShape, coordOfInput); + + input[indexOfInput]; + auto inputValue = input.Get(); + + output[idx]; + output.Set(inputValue); + } +} + +} //namespace armnn \ No newline at end of file diff --git a/src/backends/reference/workloads/MirrorPad.hpp b/src/backends/reference/workloads/MirrorPad.hpp new file mode 100644 index 0000000000..3deaf1d5fd --- /dev/null +++ b/src/backends/reference/workloads/MirrorPad.hpp @@ -0,0 +1,22 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "armnn/Tensor.hpp" + +#include +#include + +namespace armnn +{ + +void MirrorPad(const TensorInfo& inputInfo, + const TensorInfo& outputInfo, + const ITensorHandle* inputHandle, + ITensorHandle* outputHandle, + const PadQueueDescriptor& data); + +} //namespace armnn diff --git a/src/backends/reference/workloads/RefPadWorkload.cpp b/src/backends/reference/workloads/RefPadWorkload.cpp index f15306d1af..fd0728c8cd 100644 --- a/src/backends/reference/workloads/RefPadWorkload.cpp +++ b/src/backends/reference/workloads/RefPadWorkload.cpp @@ -5,6 +5,7 @@ #include "RefPadWorkload.hpp" +#include "MirrorPad.hpp" #include "Pad.hpp" #include "Profiling.hpp" #include "RefWorkloadUtils.hpp" @@ -29,11 +30,19 @@ void RefPadWorkload::Execute(std::vector inputs, std::vector