aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Sloyan <matthew.sloyan@arm.com>2021-10-22 15:48:12 +0100
committerTeresa Charlin <teresa.charlinreyes@arm.com>2021-10-27 20:55:51 +0100
commitaf3a4ef77d8f330a995911b979417857514df62c (patch)
tree97c022fcf407d49649d4dceb285be2d047056132
parent2e5d0b2e2a212ceb803681b717cbaf821f5e0929 (diff)
downloadarmnn-af3a4ef77d8f330a995911b979417857514df62c.tar.gz
IVGCVSW-6469 Add MirrorPad TfLiteParser and TfLiteDelegate Support
Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com> Change-Id: Ia1c97adb401c5381341408ec1e4da287ef2d48fe
-rw-r--r--CMakeLists.txt1
-rw-r--r--delegate/CMakeLists.txt1
-rw-r--r--delegate/src/Pad.hpp42
-rw-r--r--delegate/src/armnn_delegate.cpp6
-rw-r--r--delegate/src/test/MirrorPadTest.cpp341
-rw-r--r--delegate/src/test/PadTestHelper.hpp16
-rw-r--r--docs/01_01_parsers.dox1
-rw-r--r--docs/01_03_delegate.dox2
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.cpp72
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.hpp1
-rw-r--r--src/armnnTfLiteParser/test/MirrorPad.cpp138
11 files changed, 618 insertions, 3 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 972db42bdf..2def5fe8e0 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -708,6 +708,7 @@ if(BUILD_UNIT_TESTS)
src/armnnTfLiteParser/test/MaxPool2D.cpp
src/armnnTfLiteParser/test/Mean.cpp
src/armnnTfLiteParser/test/Minimum.cpp
+ src/armnnTfLiteParser/test/MirrorPad.cpp
src/armnnTfLiteParser/test/Multiplication.cpp
src/armnnTfLiteParser/test/Pack.cpp
src/armnnTfLiteParser/test/Pad.cpp
diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt
index 0178594bfe..54ddd61d3d 100644
--- a/delegate/CMakeLists.txt
+++ b/delegate/CMakeLists.txt
@@ -160,6 +160,7 @@ if(BUILD_UNIT_TESTS)
src/test/LogicalTestHelper.hpp
src/test/LstmTest.cpp
src/test/LstmTestHelper.hpp
+ src/test/MirrorPadTest.cpp
src/test/NormalizationTest.cpp
src/test/NormalizationTestHelper.hpp
src/test/PackTest.cpp
diff --git a/delegate/src/Pad.hpp b/delegate/src/Pad.hpp
index 431b8d33f2..78e07760fb 100644
--- a/delegate/src/Pad.hpp
+++ b/delegate/src/Pad.hpp
@@ -23,6 +23,7 @@ TfLiteStatus VisitPadOperator(DelegateData& delegateData,
switch(tfLitePadOperatorCode)
{
+ case kTfLiteBuiltinMirrorPad:
case kTfLiteBuiltinPad:
TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
break;
@@ -106,6 +107,47 @@ TfLiteStatus VisitPadOperator(DelegateData& delegateData,
return kTfLiteError;
}
}
+ else if (tfLitePadOperatorCode == kTfLiteBuiltinMirrorPad)
+ {
+ TfLiteMirrorPaddingParams* options = reinterpret_cast<TfLiteMirrorPaddingParams*>(tfLiteNode->builtin_data);
+
+
+ if (options->mode == TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingReflect)
+ {
+ descriptor.m_PaddingMode = armnn::PaddingMode::Reflect;
+ }
+ else if (options->mode == TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingSymmetric)
+ {
+ descriptor.m_PaddingMode = armnn::PaddingMode::Symmetric;
+ }
+ else
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: PaddingMode must be either REFLECT or SYMMETRIC in operator #%d node #%d: ",
+ tfLitePadOperatorCode, nodeIndex);
+ }
+
+ // If padding mode is Reflect then both paddings must be no greater than inputShape(i) - 1.
+ // If padding mode is Symmetric then both paddings must be no greater than inputShape(i).
+ auto inputShape = inputTensorInfo.GetShape();
+ auto padList = descriptor.m_PadList;
+
+ const unsigned int isReflect =
+ static_cast<unsigned int>(descriptor.m_PaddingMode == armnn::PaddingMode::Reflect);
+ for(unsigned int i = 0; i < padList.size(); ++i)
+ {
+ if(padList.at(i).first > (inputShape[i] - isReflect) ||
+ padList.at(i).second > (inputShape[i] - isReflect))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Padding values must be less (Reflect) or "
+ "equal (Symmetric) to the dimension size in operator #%d node #%d: ",
+ tfLitePadOperatorCode, nodeIndex);
+ }
+ }
+ }
if (!delegateData.m_Network)
{
diff --git a/delegate/src/armnn_delegate.cpp b/delegate/src/armnn_delegate.cpp
index e029e2c420..ae25430e0d 100644
--- a/delegate/src/armnn_delegate.cpp
+++ b/delegate/src/armnn_delegate.cpp
@@ -725,6 +725,12 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData,
tfLiteNode,
nodeIndex,
kTfLiteBuiltinMinimum);
+ case kTfLiteBuiltinMirrorPad:
+ return VisitPadOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinMirrorPad);
case kTfLiteBuiltinMul:
return VisitElementwiseBinaryOperator(delegateData,
tfLiteContext,
diff --git a/delegate/src/test/MirrorPadTest.cpp b/delegate/src/test/MirrorPadTest.cpp
new file mode 100644
index 0000000000..ca66181a30
--- /dev/null
+++ b/delegate/src/test/MirrorPadTest.cpp
@@ -0,0 +1,341 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "PadTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/schema/schema_generated.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+void MirrorPadSymmetric2dTest(std::vector<armnn::BackendId>& backends)
+{
+ // Set input data
+ std::vector<int32_t> inputShape { 3, 3 };
+ std::vector<int32_t> outputShape { 7, 7 };
+ std::vector<int32_t> paddingShape { 2, 2 };
+
+ std::vector<float> inputValues =
+ {
+ 1.0f, 2.0f, 3.0f,
+ 4.0f, 5.0f, 6.0f,
+ 7.0f, 8.0f, 9.0f
+ };
+
+ std::vector<float> expectedOutputValues =
+ {
+ 5.0f, 4.0f, 4.0f, 5.0f, 6.0f, 6.0f, 5.0f,
+ 2.0f, 1.0f, 1.0f, 2.0f, 3.0f, 3.0f, 2.0f,
+ 2.0f, 1.0f, 1.0f, 2.0f, 3.0f, 3.0f, 2.0f,
+ 5.0f, 4.0f, 4.0f, 5.0f, 6.0f, 6.0f, 5.0f,
+ 8.0f, 7.0f, 7.0f, 8.0f, 9.0f, 9.0f, 8.0f,
+ 8.0f, 7.0f, 7.0f, 8.0f, 9.0f, 9.0f, 8.0f,
+ 5.0f, 4.0f, 4.0f, 5.0f, 6.0f, 6.0f, 5.0f
+ };
+
+ std::vector<int32_t> paddingDim = { 2, 2, 2, 2 };
+
+ PadTest<float>(tflite::BuiltinOperator_MIRROR_PAD,
+ ::tflite::TensorType_FLOAT32,
+ backends,
+ inputShape,
+ paddingShape,
+ outputShape,
+ inputValues,
+ paddingDim,
+ expectedOutputValues,
+ 0, // Padding value - Not used in these tests.
+ 1.0f, // Scale
+ 0, // Offset
+ tflite::MirrorPadMode_SYMMETRIC);
+}
+
+void MirrorPadReflect2dTest(std::vector<armnn::BackendId>& backends)
+{
+ // Set input data
+ std::vector<int32_t> inputShape { 3, 3 };
+ std::vector<int32_t> outputShape { 7, 7 };
+ std::vector<int32_t> paddingShape { 2, 2 };
+
+ std::vector<float> inputValues =
+ {
+ 1.0f, 2.0f, 3.0f,
+ 4.0f, 5.0f, 6.0f,
+ 7.0f, 8.0f, 9.0f
+ };
+
+ std::vector<float> expectedOutputValues =
+ {
+ 9.0f, 8.0f, 7.0f, 8.0f, 9.0f, 8.0f, 7.0f,
+ 6.0f, 5.0f, 4.0f, 5.0f, 6.0f, 5.0f, 4.0f,
+ 3.0f, 2.0f, 1.0f, 2.0f, 3.0f, 2.0f, 1.0f,
+ 6.0f, 5.0f, 4.0f, 5.0f, 6.0f, 5.0f, 4.0f,
+ 9.0f, 8.0f, 7.0f, 8.0f, 9.0f, 8.0f, 7.0f,
+ 6.0f, 5.0f, 4.0f, 5.0f, 6.0f, 5.0f, 4.0f,
+ 3.0f, 2.0f, 1.0f, 2.0f, 3.0f, 2.0f, 1.0f
+ };
+
+ std::vector<int32_t> paddingDim = { 2, 2, 2, 2 };
+
+ PadTest<float>(tflite::BuiltinOperator_MIRROR_PAD,
+ ::tflite::TensorType_FLOAT32,
+ backends,
+ inputShape,
+ paddingShape,
+ outputShape,
+ inputValues,
+ paddingDim,
+ expectedOutputValues,
+ 0, // Padding value - Not used in these tests.
+ 1.0f, // Scale
+ 0, // Offset
+ tflite::MirrorPadMode_REFLECT);
+}
+
+void MirrorPadSymmetric3dTest(std::vector<armnn::BackendId>& backends)
+{
+ // Set input data
+ std::vector<int32_t> inputShape { 2, 2, 2 };
+ std::vector<int32_t> outputShape { 4, 4, 4 };
+ std::vector<int32_t> paddingShape { 3, 2 };
+
+ std::vector<float> inputValues =
+ {
+ // Channel 0, Height (2) x Width (2)
+ 1.0f, 2.0f,
+ 3.0f, 4.0f,
+
+ // Channel 1, Height (2) x Width (2)
+ 5.0f, 6.0f,
+ 7.0f, 8.0f
+ };
+
+ std::vector<float> expectedOutputValues =
+ {
+ 1.0f, 1.0f, 2.0f, 2.0f,
+ 1.0f, 1.0f, 2.0f, 2.0f,
+ 3.0f, 3.0f, 4.0f, 4.0f,
+ 3.0f, 3.0f, 4.0f, 4.0f,
+
+ 1.0f, 1.0f, 2.0f, 2.0f,
+ 1.0f, 1.0f, 2.0f, 2.0f,
+ 3.0f, 3.0f, 4.0f, 4.0f,
+ 3.0f, 3.0f, 4.0f, 4.0f,
+
+ 5.0f, 5.0f, 6.0f, 6.0f,
+ 5.0f, 5.0f, 6.0f, 6.0f,
+ 7.0f, 7.0f, 8.0f, 8.0f,
+ 7.0f, 7.0f, 8.0f, 8.0f,
+
+ 5.0f, 5.0f, 6.0f, 6.0f,
+ 5.0f, 5.0f, 6.0f, 6.0f,
+ 7.0f, 7.0f, 8.0f, 8.0f,
+ 7.0f, 7.0f, 8.0f, 8.0f
+ };
+
+ std::vector<int32_t> paddingDim = { 1, 1, 1, 1, 1, 1 };
+
+ PadTest<float>(tflite::BuiltinOperator_MIRROR_PAD,
+ ::tflite::TensorType_FLOAT32,
+ backends,
+ inputShape,
+ paddingShape,
+ outputShape,
+ inputValues,
+ paddingDim,
+ expectedOutputValues,
+ 0, // Padding value - Not used in these tests.
+ 1.0f, // Scale
+ 0, // Offset
+ tflite::MirrorPadMode_SYMMETRIC);
+}
+
+void MirrorPadReflect3dTest(std::vector<armnn::BackendId>& backends)
+{
+ // Set input data
+ std::vector<int32_t> inputShape { 2, 2, 2 };
+ std::vector<int32_t> outputShape { 4, 4, 4 };
+ std::vector<int32_t> paddingShape { 3, 2 };
+
+ std::vector<float> inputValues =
+ {
+ // Channel 0, Height (2) x Width (2)
+ 1.0f, 2.0f,
+ 3.0f, 4.0f,
+
+ // Channel 1, Height (2) x Width (2)
+ 5.0f, 6.0f,
+ 7.0f, 8.0f
+ };
+
+ std::vector<float> expectedOutputValues =
+ {
+ 8.0f, 7.0f, 8.0f, 7.0f,
+ 6.0f, 5.0f, 6.0f, 5.0f,
+ 8.0f, 7.0f, 8.0f, 7.0f,
+ 6.0f, 5.0f, 6.0f, 5.0f,
+
+ 4.0f, 3.0f, 4.0f, 3.0f,
+ 2.0f, 1.0f, 2.0f, 1.0f,
+ 4.0f, 3.0f, 4.0f, 3.0f,
+ 2.0f, 1.0f, 2.0f, 1.0f,
+
+ 8.0f, 7.0f, 8.0f, 7.0f,
+ 6.0f, 5.0f, 6.0f, 5.0f,
+ 8.0f, 7.0f, 8.0f, 7.0f,
+ 6.0f, 5.0f, 6.0f, 5.0f,
+
+ 4.0f, 3.0f, 4.0f, 3.0f,
+ 2.0f, 1.0f, 2.0f, 1.0f,
+ 4.0f, 3.0f, 4.0f, 3.0f,
+ 2.0f, 1.0f, 2.0f, 1.0f
+ };
+
+ std::vector<int32_t> paddingDim = { 1, 1, 1, 1, 1, 1 };
+
+ PadTest<float>(tflite::BuiltinOperator_MIRROR_PAD,
+ ::tflite::TensorType_FLOAT32,
+ backends,
+ inputShape,
+ paddingShape,
+ outputShape,
+ inputValues,
+ paddingDim,
+ expectedOutputValues,
+ 0, // Padding value - Not used in these tests.
+ 1.0f, // Scale
+ 0, // Offset
+ tflite::MirrorPadMode_REFLECT);
+}
+
+void MirrorPadSymmetricUint8Test(std::vector<armnn::BackendId>& backends)
+{
+ // Set input data
+ std::vector<int32_t> inputShape { 3, 3 };
+ std::vector<int32_t> outputShape { 5, 7 };
+ std::vector<int32_t> paddingShape { 2, 2 };
+
+ std::vector<uint8_t> inputValues =
+ {
+ 1, 2, 3,
+ 4, 5, 6,
+ 7, 8, 9
+ };
+
+ std::vector<uint8_t> expectedOutputValues =
+ {
+ 2, 1, 1, 2, 3, 3, 2,
+ 2, 1, 1, 2, 3, 3, 2,
+ 5, 4, 4, 5, 6, 6, 5,
+ 8, 7, 7, 8, 9, 9, 8,
+ 8, 7, 7, 8, 9, 9, 8,
+ };
+
+ std::vector<int32_t> paddingDim = { 1, 1, 2, 2 };
+
+ PadTest<uint8_t>(tflite::BuiltinOperator_MIRROR_PAD,
+ ::tflite::TensorType_UINT8,
+ backends,
+ inputShape,
+ paddingShape,
+ outputShape,
+ inputValues,
+ paddingDim,
+ expectedOutputValues,
+ 0, // Padding value - Not used in these tests.
+ 1.0f, // Scale
+ 1, // Offset
+ tflite::MirrorPadMode_SYMMETRIC);
+}
+
+void MirrorPadReflectInt8Test(std::vector<armnn::BackendId>& backends)
+{
+ // Set input data
+ std::vector<int32_t> inputShape { 3, 3 };
+ std::vector<int32_t> outputShape { 7, 5 };
+ std::vector<int32_t> paddingShape { 2, 2 };
+
+ std::vector<int8_t> inputValues =
+ {
+ 1, 2, 3,
+ 4, 5, 6,
+ 7, 8, 9
+ };
+
+ std::vector<int8_t> expectedOutputValues =
+ {
+ 8, 7, 8, 9, 8,
+ 5, 4, 5, 6, 5,
+ 2, 1, 2, 3, 2,
+ 5, 4, 5, 6, 5,
+ 8, 7, 8, 9, 8,
+ 5, 4, 5, 6, 5,
+ 2, 1, 2, 3, 2
+ };
+
+ std::vector<int32_t> paddingDim = { 2, 2, 1, 1 };
+
+ PadTest<int8_t>(tflite::BuiltinOperator_MIRROR_PAD,
+ ::tflite::TensorType_INT8,
+ backends,
+ inputShape,
+ paddingShape,
+ outputShape,
+ inputValues,
+ paddingDim,
+ expectedOutputValues,
+ 0, // Padding value - Not used in these tests.
+ 1.0f, // Scale
+ 1, // Offset
+ tflite::MirrorPadMode_REFLECT);
+}
+
+TEST_SUITE("MirrorPad_CpuRefTests")
+{
+
+TEST_CASE ("MirrorPadSymmetric2d_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ MirrorPadSymmetric2dTest(backends);
+}
+
+TEST_CASE ("MirrorPadReflect2d_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ MirrorPadReflect2dTest(backends);
+}
+
+TEST_CASE ("MirrorPadSymmetric3d_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ MirrorPadSymmetric3dTest(backends);
+}
+
+TEST_CASE ("MirrorPadReflect3d_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ MirrorPadReflect3dTest(backends);
+}
+
+TEST_CASE ("MirrorPadSymmetricUint8_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ MirrorPadSymmetricUint8Test(backends);
+}
+
+TEST_CASE ("MirrorPadSymmetricInt8_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ MirrorPadReflectInt8Test(backends);
+}
+
+} // TEST_SUITE("MirrorPad_CpuRefTests")
+
+} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/test/PadTestHelper.hpp b/delegate/src/test/PadTestHelper.hpp
index 3d6e6493d9..5b9a1bcc36 100644
--- a/delegate/src/test/PadTestHelper.hpp
+++ b/delegate/src/test/PadTestHelper.hpp
@@ -25,6 +25,7 @@ template <typename T>
std::vector<char> CreatePadTfLiteModel(
tflite::BuiltinOperator padOperatorCode,
tflite::TensorType tensorType,
+ tflite::MirrorPadMode paddingMode,
const std::vector<int32_t>& inputTensorShape,
const std::vector<int32_t>& paddingTensorShape,
const std::vector<int32_t>& outputTensorShape,
@@ -87,7 +88,14 @@ std::vector<char> CreatePadTfLiteModel(
operatorInputs = {{ 0, 1 }};
subgraphInputs = {{ 0, 1 }};
operatorBuiltinOptions = CreatePadOptions(flatBufferBuilder).Union();
+ }
+ else if(padOperatorCode == tflite::BuiltinOperator_MIRROR_PAD)
+ {
+ operatorInputs = {{ 0, 1 }};
+ subgraphInputs = {{ 0, 1 }};
+ operatorBuiltinOptionsType = BuiltinOptions_MirrorPadOptions;
+ operatorBuiltinOptions = CreateMirrorPadOptions(flatBufferBuilder, paddingMode).Union();
}
else if (padOperatorCode == tflite::BuiltinOperator_PADV2)
{
@@ -116,7 +124,7 @@ std::vector<char> CreatePadTfLiteModel(
// create operator
const std::vector<int32_t> operatorOutputs{ 2 };
- flatbuffers::Offset <Operator> redefineOperator =
+ flatbuffers::Offset <Operator> paddingOperator =
CreateOperator(flatBufferBuilder,
0,
flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
@@ -130,7 +138,7 @@ std::vector<char> CreatePadTfLiteModel(
flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&redefineOperator, 1));
+ flatBufferBuilder.CreateVector(&paddingOperator, 1));
flatbuffers::Offset <flatbuffers::String> modelDescription =
flatBufferBuilder.CreateString("ArmnnDelegate: Pad Operator Model");
@@ -163,11 +171,13 @@ void PadTest(tflite::BuiltinOperator padOperatorCode,
std::vector<T>& expectedOutputValues,
T paddingValue,
float quantScale = 1.0f,
- int quantOffset = 0)
+ int quantOffset = 0,
+ tflite::MirrorPadMode paddingMode = tflite::MirrorPadMode_SYMMETRIC)
{
using namespace tflite;
std::vector<char> modelBuffer = CreatePadTfLiteModel<T>(padOperatorCode,
tensorType,
+ paddingMode,
inputShape,
paddingShape,
outputShape,
diff --git a/docs/01_01_parsers.dox b/docs/01_01_parsers.dox
index adc3051429..186ed6193a 100644
--- a/docs/01_01_parsers.dox
+++ b/docs/01_01_parsers.dox
@@ -149,6 +149,7 @@ The Arm NN SDK TensorFlow Lite parser currently supports the following operators
- MAXIMUM
- MEAN
- MINIMUM
+- MIRROR_PAD
- MUL
- NEG
- NOT_EQUAL
diff --git a/docs/01_03_delegate.dox b/docs/01_03_delegate.dox
index 2d30e653fa..b3caf8cbf8 100644
--- a/docs/01_03_delegate.dox
+++ b/docs/01_03_delegate.dox
@@ -109,6 +109,8 @@ The Arm NN SDK TensorFlow Lite delegate currently supports the following operato
- MINIMUM
+- MIRROR_PAD
+
- MUL
- NEG
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 7db5d85b13..125a763ff4 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -670,6 +670,7 @@ TfLiteParserImpl::TfLiteParserImpl(const Optional<ITfLiteParser::TfLiteParserOpt
m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParserImpl::ParseMaximum;
m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParserImpl::ParseMean;
m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParserImpl::ParseMinimum;
+ m_ParserFunctions[tflite::BuiltinOperator_MIRROR_PAD] = &TfLiteParserImpl::ParseMirrorPad;
m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParserImpl::ParseMul;
m_ParserFunctions[tflite::BuiltinOperator_NEG] = &TfLiteParserImpl::ParseNeg;
m_ParserFunctions[tflite::BuiltinOperator_NOT_EQUAL] = &TfLiteParserImpl::ParseNotEqual;
@@ -2214,6 +2215,77 @@ void TfLiteParserImpl::ParsePad(size_t subgraphIndex, size_t operatorIndex)
RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
}
+void TfLiteParserImpl::ParseMirrorPad(size_t subgraphIndex, size_t operatorIndex)
+{
+ CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
+
+ TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
+ CHECK_VALID_SIZE(inputs.size(), 2);
+
+ TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
+ CHECK_VALID_SIZE(outputs.size(), 1);
+
+ armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
+
+ armnn::TensorInfo padTensorInfo = ToTensorInfo(inputs[1]);
+ BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
+
+ std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
+ ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
+
+ size_t step = 2;
+ armnn::PadDescriptor desc;
+ for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
+ {
+ desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
+ }
+
+ const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
+ const auto* options = operatorPtr->builtin_options.AsMirrorPadOptions();
+
+ if (options->mode == tflite::MirrorPadMode_REFLECT)
+ {
+ desc.m_PaddingMode = PaddingMode::Reflect;
+ }
+ else if (options->mode == tflite::MirrorPadMode_SYMMETRIC)
+ {
+ desc.m_PaddingMode = PaddingMode::Symmetric;
+ }
+ else
+ {
+ ARMNN_THROW_PARSE_EXCEPTION("PaddingMode must be either REFLECT or SYMMETRIC");
+ }
+
+ // If padding mode is Reflect then both paddings must be no greater than inputShape(i) - 1.
+ // If padding mode is Symmetric then both paddings must be no greater than inputShape(i).
+ auto inputShape = inputTensorInfo.GetShape();
+ auto padList = desc.m_PadList;
+
+ const unsigned int isReflect = static_cast<unsigned int>(desc.m_PaddingMode == PaddingMode::Reflect);
+ for(unsigned int i = 0; i < padList.size(); ++i)
+ {
+ if(padList.at(i).first > (inputShape[i] - isReflect) ||
+ padList.at(i).second > (inputShape[i] - isReflect))
+ {
+ ARMNN_THROW_PARSE_EXCEPTION("Padding values must be less (Reflect) or "
+ "equal (Symmetric) to the dimension size.");
+ }
+ }
+
+ auto layerName = fmt::format("MirrorPad:{}:{}", subgraphIndex, operatorIndex);
+ TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
+
+ IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
+ ARMNN_ASSERT(layer != nullptr);
+ layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
+
+ auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
+}
+
void TfLiteParserImpl::ParsePrelu(size_t subgraphIndex, size_t operatorIndex)
{
CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
diff --git a/src/armnnTfLiteParser/TfLiteParser.hpp b/src/armnnTfLiteParser/TfLiteParser.hpp
index 3d4fd6504f..512b87fd6c 100644
--- a/src/armnnTfLiteParser/TfLiteParser.hpp
+++ b/src/armnnTfLiteParser/TfLiteParser.hpp
@@ -149,6 +149,7 @@ private:
void ParseMaximum(size_t subgraphIndex, size_t operatorIndex);
void ParseMean(size_t subgraphIndex, size_t operatorIndex);
void ParseMinimum(size_t subgraphIndex, size_t operatorIndex);
+ void ParseMirrorPad(size_t subgraphIndex, size_t operatorIndex);
void ParseMul(size_t subgraphIndex, size_t operatorIndex);
void ParseNeg(size_t subgraphIndex, size_t operatorIndex);
void ParseNotEqual(size_t subgraphIndex, size_t operatorIndex);
diff --git a/src/armnnTfLiteParser/test/MirrorPad.cpp b/src/armnnTfLiteParser/test/MirrorPad.cpp
new file mode 100644
index 0000000000..af0cbfdd60
--- /dev/null
+++ b/src/armnnTfLiteParser/test/MirrorPad.cpp
@@ -0,0 +1,138 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ParserFlatbuffersFixture.hpp"
+
+TEST_SUITE("TensorflowLiteParser_MirrorPad")
+{
+struct MirrorPadFixture : public ParserFlatbuffersFixture
+{
+ explicit MirrorPadFixture(const std::string& inputShape,
+ const std::string& outputShape,
+ const std::string& padListShape,
+ const std::string& padListData,
+ const std::string& padMode,
+ const std::string& dataType = "FLOAT32",
+ const std::string& scale = "1.0",
+ const std::string& offset = "0")
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [ { "builtin_code": "MIRROR_PAD" } ],
+ "subgraphs": [ {
+ "tensors": [
+ {
+ "shape": )" + inputShape + R"(,
+ "type": )" + dataType + R"(,
+ "buffer": 0,
+ "name": "inputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ )" + scale + R"( ],
+ "zero_point": [ )" + offset + R"( ],
+ }
+ },
+ {
+ "shape": )" + outputShape + R"(,
+ "type": )" + dataType + R"(,
+ "buffer": 1,
+ "name": "outputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ )" + scale + R"( ],
+ "zero_point": [ )" + offset + R"( ],
+ }
+ },
+ {
+ "shape": )" + padListShape + R"( ,
+ "type": "INT32",
+ "buffer": 2,
+ "name": "padList",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ 1.0 ],
+ "zero_point": [ 0 ],
+ }
+ }
+ ],
+ "inputs": [ 0 ],
+ "outputs": [ 1 ],
+ "operators": [
+ {
+ "opcode_index": 0,
+ "inputs": [ 0, 2 ],
+ "outputs": [ 1 ],
+ "builtin_options_type": "MirrorPadOptions",
+ "builtin_options": {
+ "mode": )" + padMode + R"( ,
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ }
+ ],
+ } ],
+ "buffers" : [
+ { },
+ { },
+ { "data": )" + padListData + R"(, },
+ ]
+ }
+ )";
+ SetupSingleInputSingleOutput("inputTensor", "outputTensor");
+ }
+};
+
+struct SimpleMirrorPadSymmetricFixture : public MirrorPadFixture
+{
+ SimpleMirrorPadSymmetricFixture() : MirrorPadFixture("[ 3, 3 ]", "[ 7, 7 ]", "[ 2, 2 ]",
+ "[ 2,0,0,0, 2,0,0,0, 2,0,0,0, 2,0,0,0 ]",
+ "SYMMETRIC", "FLOAT32") {}
+};
+
+TEST_CASE_FIXTURE(SimpleMirrorPadSymmetricFixture, "ParseMirrorPadSymmetric")
+{
+ RunTest<2, armnn::DataType::Float32>
+ (0,
+ {{ "inputTensor", { 1.0f, 2.0f, 3.0f,
+ 4.0f, 5.0f, 6.0f,
+ 7.0f, 8.0f, 9.0f }}},
+
+ {{ "outputTensor", { 5.0f, 4.0f, 4.0f, 5.0f, 6.0f, 6.0f, 5.0f,
+ 2.0f, 1.0f, 1.0f, 2.0f, 3.0f, 3.0f, 2.0f,
+ 2.0f, 1.0f, 1.0f, 2.0f, 3.0f, 3.0f, 2.0f,
+ 5.0f, 4.0f, 4.0f, 5.0f, 6.0f, 6.0f, 5.0f,
+ 8.0f, 7.0f, 7.0f, 8.0f, 9.0f, 9.0f, 8.0f,
+ 8.0f, 7.0f, 7.0f, 8.0f, 9.0f, 9.0f, 8.0f,
+ 5.0f, 4.0f, 4.0f, 5.0f, 6.0f, 6.0f, 5.0f }}});
+}
+
+struct SimpleMirrorPadReflectFixture : public MirrorPadFixture
+{
+ SimpleMirrorPadReflectFixture() : MirrorPadFixture("[ 3, 3 ]", "[ 7, 7 ]", "[ 2, 2 ]",
+ "[ 2,0,0,0, 2,0,0,0, 2,0,0,0, 2,0,0,0 ]",
+ "REFLECT", "FLOAT32") {}
+};
+
+TEST_CASE_FIXTURE(SimpleMirrorPadReflectFixture, "ParseMirrorPadRelfect")
+{
+ RunTest<2, armnn::DataType::Float32>
+ (0,
+ {{ "inputTensor", { 1.0f, 2.0f, 3.0f,
+ 4.0f, 5.0f, 6.0f,
+ 7.0f, 8.0f, 9.0f }}},
+
+ {{ "outputTensor", { 9.0f, 8.0f, 7.0f, 8.0f, 9.0f, 8.0f, 7.0f,
+ 6.0f, 5.0f, 4.0f, 5.0f, 6.0f, 5.0f, 4.0f,
+ 3.0f, 2.0f, 1.0f, 2.0f, 3.0f, 2.0f, 1.0f,
+ 6.0f, 5.0f, 4.0f, 5.0f, 6.0f, 5.0f, 4.0f,
+ 9.0f, 8.0f, 7.0f, 8.0f, 9.0f, 8.0f, 7.0f,
+ 6.0f, 5.0f, 4.0f, 5.0f, 6.0f, 5.0f, 4.0f,
+ 3.0f, 2.0f, 1.0f, 2.0f, 3.0f, 2.0f, 1.0f }}});
+}
+
+}