From 958024be8f5c54f6e2a2930d40da62fda451bba7 Mon Sep 17 00:00:00 2001 From: Narumol Prangnawarat Date: Thu, 17 Dec 2020 12:17:58 +0000 Subject: IVGCVSW-5383 TfLiteDelegate: Implement Pad and PadV2 operators * Add Pad and PadV2 operators support to Armnn Delegate * Add dimension check to CompareOutputData test utility * Unit tests Signed-off-by: Narumol Prangnawarat Change-Id: I9d00eb08f71e791498908fcbdb9de561e1c01aef --- delegate/src/test/PadTest.cpp | 606 ++++++++++++++++++++++++++++++++++++ delegate/src/test/PadTestHelper.hpp | 214 +++++++++++++ delegate/src/test/TestUtils.hpp | 3 + 3 files changed, 823 insertions(+) create mode 100644 delegate/src/test/PadTest.cpp create mode 100644 delegate/src/test/PadTestHelper.hpp (limited to 'delegate/src/test') diff --git a/delegate/src/test/PadTest.cpp b/delegate/src/test/PadTest.cpp new file mode 100644 index 0000000000..4721b685cc --- /dev/null +++ b/delegate/src/test/PadTest.cpp @@ -0,0 +1,606 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "PadTestHelper.hpp" + +#include + +#include +#include + +#include + +namespace armnnDelegate +{ + +void Pad2dTest(std::vector& backends, + tflite::BuiltinOperator padOperatorCode = tflite::BuiltinOperator_PAD, + float pad = 0.0f) +{ + // Set input data + std::vector inputShape { 2, 2, 2 }; + std::vector outputShape { 3, 5, 6 }; + std::vector paddingShape { 3, 2 }; + + std::vector inputValues = { 0.0f, 4.0f, + 2.0f, -5.0f, + 6.0f, 1.0f, + 5.0f, -2.0f }; + + std::vector expectedOutputValues = { pad, pad, pad, pad, pad, pad, + pad, pad, pad, pad, pad, pad, + pad, pad, 0.0f, 4.0f, pad, pad, + pad, pad, 2.0f, -5.0f, pad, pad, + pad, pad, pad, pad, pad, pad, + + pad, pad, pad, pad, pad, pad, + pad, pad, pad, pad, pad, pad, + pad, pad, 6.0f, 1.0f, pad, pad, + pad, pad, 5.0f, -2.0f, pad, pad, + pad, pad, pad, pad, pad, pad, + + pad, pad, pad, pad, pad, pad, + pad, pad, pad, pad, pad, pad, + pad, pad, pad, pad, pad, pad, + pad, pad, pad, pad, pad, pad, + pad, pad, pad, pad, pad, pad }; + + std::vector paddingDim = { 0, 1, 2, 1, 2, 2 }; + + PadTest(padOperatorCode, + ::tflite::TensorType_FLOAT32, + backends, + inputShape, + paddingShape, + outputShape, + inputValues, + paddingDim, + expectedOutputValues, + pad); +} + +void Pad3dTest(std::vector& backends, + tflite::BuiltinOperator padOperatorCode = tflite::BuiltinOperator_PAD, + float pad = 0.0f) +{ + // Set input data + std::vector inputShape { 2, 2, 2 }; + std::vector outputShape { 3, 5, 6 }; + std::vector paddingShape { 3, 2 }; + + std::vector inputValues = { 0.0f, 4.0f, + 2.0f, 5.0f, + 6.0f, 1.0f, + 5.0f, 2.0f }; + + std::vector expectedOutputValues = { pad, pad, pad, pad, pad, pad, + pad, pad, pad, pad, pad, pad, + pad, pad, 0.0f, 4.0f, pad, pad, + pad, pad, 2.0f, 5.0f, pad, pad, + pad, pad, pad, pad, pad, pad, + + pad, pad, pad, pad, pad, pad, + pad, pad, pad, pad, pad, pad, + pad, pad, 6.0f, 1.0f, pad, pad, + pad, pad, 5.0f, 2.0f, pad, pad, + pad, pad, pad, pad, pad, pad, + + pad, pad, pad, pad, pad, pad, + pad, pad, pad, pad, pad, pad, + pad, pad, pad, pad, pad, pad, + pad, pad, pad, pad, pad, pad, + pad, pad, pad, pad, pad, pad }; + + std::vector paddingDim = { 0, 1, 2, 1, 2, 2 }; + + PadTest(padOperatorCode, + ::tflite::TensorType_FLOAT32, + backends, + inputShape, + paddingShape, + outputShape, + inputValues, + paddingDim, + expectedOutputValues, + pad); +} + +void Pad4dTest(std::vector& backends, + tflite::BuiltinOperator padOperatorCode = tflite::BuiltinOperator_PAD, + float pad = 0.0f) +{ + // Set input data + std::vector inputShape { 2, 2, 3, 2 }; + std::vector outputShape { 4, 5, 7, 4 }; + std::vector paddingShape { 4, 2 }; + + std::vector inputValues = { 0.0f, 1.0f, + 2.0f, 3.0f, + 4.0f, 5.0f, + + 6.0f, 7.0f, + 8.0f, 9.0f, + 10.0f, 11.0f, + + 12.0f, 13.0f, + 14.0f, 15.0f, + 16.0f, 17.0f, + + 18.0f, 19.0f, + 20.0f, 21.0f, + 22.0f, 23.0f }; + + std::vector expectedOutputValues = { pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, 0.0f, 1.0f, pad, + pad, 2.0f, 3.0f, pad, + pad, 4.0f, 5.0f, pad, + pad, pad, pad, pad, + + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, 6.0f, 7.0f, pad, + pad, 8.0f, 9.0f, pad, + pad, 10.0f, 11.0f, pad, + pad, pad, pad, pad, + + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, 12.0f, 13.0f, pad, + pad, 14.0f, 15.0f, pad, + pad, 16.0f, 17.0f, pad, + pad, pad, pad, pad, + + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, 18.0f, 19.0f, pad, + pad, 20.0f, 21.0f, pad, + pad, 22.0f, 23.0f, pad, + pad, pad, pad, pad, + + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad, + pad, pad, pad, pad }; + + std::vector paddingDim = { 1, 1, 2, 1, 3, 1, 1, 1 }; + + PadTest(padOperatorCode, + ::tflite::TensorType_FLOAT32, + backends, + inputShape, + paddingShape, + outputShape, + inputValues, + paddingDim, + expectedOutputValues, + pad); +} + +void PadInt8Test(std::vector& backends, + tflite::BuiltinOperator padOperatorCode = tflite::BuiltinOperator_PAD, + int8_t paddingValue = 0, + int8_t p = 3, + float quantizationScale = -2.0f, + int32_t quantizationOffset = 3) +{ + // Set input data + std::vector inputShape { 2, 2, 2 }; + std::vector outputShape { 3, 5, 6 }; + std::vector paddingShape { 3, 2 }; + + std::vector inputValues = { 0, 4, + 2, -5, + 6, 1, + 5, -2 }; + + std::vector expectedOutputValues = { p, p, p, p, p, p, + p, p, p, p, p, p, + p, p, 0, 4, p, p, + p, p, 2, -5, p, p, + p, p, p, p, p, p, + + p, p, p, p, p, p, + p, p, p, p, p, p, + p, p, 6, 1, p, p, + p, p, 5, -2, p, p, + p, p, p, p, p, p, + + p, p, p, p, p, p, + p, p, p, p, p, p, + p, p, p, p, p, p, + p, p, p, p, p, p, + p, p, p, p, p, p }; + + std::vector paddingDim = { 0, 1, 2, 1, 2, 2 }; + + PadTest(padOperatorCode, + ::tflite::TensorType_INT8, + backends, + inputShape, + paddingShape, + outputShape, + inputValues, + paddingDim, + expectedOutputValues, + paddingValue, + quantizationScale, + quantizationOffset); +} + +void PadUint8Test(std::vector& backends, + tflite::BuiltinOperator padOperatorCode = tflite::BuiltinOperator_PAD, + uint8_t paddingValue = 0, + uint8_t p = 3, + float quantizationScale = -2.0f, + int32_t quantizationOffset = 3) +{ + // Set input data + std::vector inputShape { 2, 2, 2 }; + std::vector outputShape { 3, 5, 6 }; + std::vector paddingShape { 3, 2 }; + + std::vector inputValues = { 0, 4, + 2, 5, + 6, 1, + 5, 2 }; + + std::vector expectedOutputValues = { p, p, p, p, p, p, + p, p, p, p, p, p, + p, p, 0, 4, p, p, + p, p, 2, 5, p, p, + p, p, p, p, p, p, + + p, p, p, p, p, p, + p, p, p, p, p, p, + p, p, 6, 1, p, p, + p, p, 5, 2, p, p, + p, p, p, p, p, p, + + p, p, p, p, p, p, + p, p, p, p, p, p, + p, p, p, p, p, p, + p, p, p, p, p, p, + p, p, p, p, p, p }; + + std::vector paddingDim = { 0, 1, 2, 1, 2, 2 }; + + PadTest(padOperatorCode, + ::tflite::TensorType_UINT8, + backends, + inputShape, + paddingShape, + outputShape, + inputValues, + paddingDim, + expectedOutputValues, + paddingValue, + quantizationScale, + quantizationOffset); +} + +TEST_SUITE("Pad_CpuRefTests") +{ + +TEST_CASE ("Pad2d_CpuRef_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + Pad2dTest(backends); +} + +TEST_CASE ("Pad3d_CpuRef_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + Pad3dTest(backends); +} + +TEST_CASE ("Pad4d_CpuRef_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + Pad4dTest(backends); +} + +TEST_CASE ("Pad_Int8_CpuRef_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + PadInt8Test(backends); +} + +TEST_CASE ("Pad_Uint8_CpuRef_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + PadUint8Test(backends); +} + +TEST_CASE ("PadV22d_CpuRef_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + Pad2dTest(backends, tflite::BuiltinOperator_PADV2, -2.5); +} + +TEST_CASE ("PadV23d_CpuRef_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + Pad3dTest(backends, tflite::BuiltinOperator_PADV2, 2.0); +} + +TEST_CASE ("PadV24d_CpuRef_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + Pad4dTest(backends, tflite::BuiltinOperator_PADV2, -1.33); +} + +TEST_CASE ("PadV2_Int8_CpuRef_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + PadInt8Test(backends, tflite::BuiltinOperator_PADV2, -1, -1); +} + +TEST_CASE ("PadV2_Uint8_CpuRef_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + PadUint8Test(backends, tflite::BuiltinOperator_PADV2, -1, -1); +} + +} // TEST_SUITE("Pad_CpuRefTests") + +TEST_SUITE("Pad_CpuAccTests") +{ + +TEST_CASE ("Pad2d_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc }; + Pad2dTest(backends); +} + +TEST_CASE ("Pad3d_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc }; + Pad3dTest(backends); +} + +TEST_CASE ("Pad4d_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc }; + Pad4dTest(backends); +} + +TEST_CASE ("Pad_Int8_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc }; + PadInt8Test(backends); +} + +TEST_CASE ("Pad_Uint8_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc }; + PadUint8Test(backends); +} + +TEST_CASE ("PadV22d_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc }; + Pad2dTest(backends, tflite::BuiltinOperator_PADV2, -2.5); +} + +TEST_CASE ("PadV23d_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc }; + Pad3dTest(backends, tflite::BuiltinOperator_PADV2, 2.0); +} + +TEST_CASE ("PadV24d_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc }; + Pad4dTest(backends, tflite::BuiltinOperator_PADV2, -1.33); +} + +TEST_CASE ("PadV2_Int8_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc }; + PadInt8Test(backends, tflite::BuiltinOperator_PADV2, -1, -1); +} + +TEST_CASE ("PadV2_Uint8_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc }; + PadUint8Test(backends, tflite::BuiltinOperator_PADV2, -1, -1); +} + +} // TEST_SUITE("Pad_CpuAccTests") + +TEST_SUITE("Pad_GpuAccTests") +{ + +TEST_CASE ("Pad2d_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc }; + Pad2dTest(backends); +} + +TEST_CASE ("Pad3d_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc }; + Pad3dTest(backends); +} + +TEST_CASE ("Pad4d_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc }; + Pad4dTest(backends); +} + +TEST_CASE ("Pad_Int8_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc }; + PadInt8Test(backends); +} + +TEST_CASE ("Pad_Uint8_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc }; + PadUint8Test(backends); +} + +TEST_CASE ("PadV22d_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc }; + Pad2dTest(backends, tflite::BuiltinOperator_PADV2, -2.5); +} + +TEST_CASE ("PadV23d_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc }; + Pad3dTest(backends, tflite::BuiltinOperator_PADV2, 2.0); +} + +TEST_CASE ("PadV24d_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc }; + Pad4dTest(backends, tflite::BuiltinOperator_PADV2, -1.33); +} + +TEST_CASE ("PadV2_Int8_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc }; + PadInt8Test(backends, tflite::BuiltinOperator_PADV2, -1, -1); +} + +TEST_CASE ("PadV2_Uint8_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc }; + PadUint8Test(backends, tflite::BuiltinOperator_PADV2, -1, -1); +} + +} // TEST_SUITE("Pad_GpuAccTests") + +} // namespace armnnDelegate \ No newline at end of file diff --git a/delegate/src/test/PadTestHelper.hpp b/delegate/src/test/PadTestHelper.hpp new file mode 100644 index 0000000000..7221dedb20 --- /dev/null +++ b/delegate/src/test/PadTestHelper.hpp @@ -0,0 +1,214 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "TestUtils.hpp" + +#include + +#include +#include +#include +#include +#include +#include + +#include + +namespace +{ + +template +std::vector CreatePadTfLiteModel( + tflite::BuiltinOperator padOperatorCode, + tflite::TensorType tensorType, + const std::vector& inputTensorShape, + const std::vector& paddingTensorShape, + const std::vector& outputTensorShape, + const std::vector& paddingDim, + const std::vector paddingValue, + float quantScale = 1.0f, + int quantOffset = 0) +{ + using namespace tflite; + flatbuffers::FlatBufferBuilder flatBufferBuilder; + + auto quantizationParameters = + CreateQuantizationParameters(flatBufferBuilder, + 0, + 0, + flatBufferBuilder.CreateVector({ quantScale }), + flatBufferBuilder.CreateVector({ quantOffset })); + + auto inputTensor = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(inputTensorShape.data(), + inputTensorShape.size()), + tensorType, + 0, + flatBufferBuilder.CreateString("input"), + quantizationParameters); + + auto paddingTensor = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(paddingTensorShape.data(), + paddingTensorShape.size()), + tflite::TensorType_INT32, + 1, + flatBufferBuilder.CreateString("padding")); + + auto outputTensor = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(outputTensorShape.data(), + outputTensorShape.size()), + tensorType, + 2, + flatBufferBuilder.CreateString("output"), + quantizationParameters); + + std::vector> tensors = { inputTensor, paddingTensor, outputTensor}; + + std::vector> buffers; + buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}))); + buffers.push_back( + CreateBuffer(flatBufferBuilder, + flatBufferBuilder.CreateVector(reinterpret_cast(paddingDim.data()), + sizeof(int32_t) * paddingDim.size()))); + buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}))); + + std::vector operatorInputs; + std::vector subgraphInputs; + + tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_PadOptions; + flatbuffers::Offset operatorBuiltinOptions; + + if (padOperatorCode == tflite::BuiltinOperator_PAD) + { + operatorInputs = {{ 0, 1 }}; + subgraphInputs = {{ 0, 1 }}; + operatorBuiltinOptions = CreatePadOptions(flatBufferBuilder).Union(); + + } + else if (padOperatorCode == tflite::BuiltinOperator_PADV2) + { + buffers.push_back( + CreateBuffer(flatBufferBuilder, + flatBufferBuilder.CreateVector(reinterpret_cast(paddingValue.data()), + sizeof(T)))); + + const std::vector shape = { 1 }; + auto padValueTensor = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(shape.data(), + shape.size()), + tensorType, + 3, + flatBufferBuilder.CreateString("paddingValue"), + quantizationParameters); + + tensors.push_back(padValueTensor); + + operatorInputs = {{ 0, 1, 3 }}; + subgraphInputs = {{ 0, 1, 3 }}; + + operatorBuiltinOptionsType = BuiltinOptions_PadV2Options; + operatorBuiltinOptions = CreatePadV2Options(flatBufferBuilder).Union(); + } + + // create operator + const std::vector operatorOutputs{{ 2 }}; + flatbuffers::Offset redefineOperator = + CreateOperator(flatBufferBuilder, + 0, + flatBufferBuilder.CreateVector(operatorInputs.data(), operatorInputs.size()), + flatBufferBuilder.CreateVector(operatorOutputs.data(), operatorOutputs.size()), + operatorBuiltinOptionsType, + operatorBuiltinOptions); + + const std::vector subgraphOutputs{{ 2 }}; + flatbuffers::Offset subgraph = + CreateSubGraph(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensors.data(), tensors.size()), + flatBufferBuilder.CreateVector(subgraphInputs.data(), subgraphInputs.size()), + flatBufferBuilder.CreateVector(subgraphOutputs.data(), subgraphOutputs.size()), + flatBufferBuilder.CreateVector(&redefineOperator, 1)); + + flatbuffers::Offset modelDescription = + flatBufferBuilder.CreateString("ArmnnDelegate: Pad Operator Model"); + flatbuffers::Offset operatorCode = CreateOperatorCode(flatBufferBuilder, + padOperatorCode); + + flatbuffers::Offset flatbufferModel = + CreateModel(flatBufferBuilder, + TFLITE_SCHEMA_VERSION, + flatBufferBuilder.CreateVector(&operatorCode, 1), + flatBufferBuilder.CreateVector(&subgraph, 1), + modelDescription, + flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); + + flatBufferBuilder.Finish(flatbufferModel); + + return std::vector(flatBufferBuilder.GetBufferPointer(), + flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); +} + +template +void PadTest(tflite::BuiltinOperator padOperatorCode, + tflite::TensorType tensorType, + const std::vector& backends, + const std::vector& inputShape, + const std::vector& paddingShape, + std::vector& outputShape, + std::vector& inputValues, + std::vector& paddingDim, + std::vector& expectedOutputValues, + T paddingValue, + float quantScale = 1.0f, + int quantOffset = 0) +{ + using namespace tflite; + std::vector modelBuffer = CreatePadTfLiteModel(padOperatorCode, + tensorType, + inputShape, + paddingShape, + outputShape, + paddingDim, + {paddingValue}, + quantScale, + quantOffset); + + const Model* tfLiteModel = GetModel(modelBuffer.data()); + CHECK(tfLiteModel != nullptr); + + std::unique_ptr armnnDelegateInterpreter; + CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) + (&armnnDelegateInterpreter) == kTfLiteOk); + CHECK(armnnDelegateInterpreter != nullptr); + CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); + + std::unique_ptr tfLiteInterpreter; + CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) + (&tfLiteInterpreter) == kTfLiteOk); + CHECK(tfLiteInterpreter != nullptr); + CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); + + // Create the ArmNN Delegate + armnnDelegate::DelegateOptions delegateOptions(backends); + std::unique_ptr + theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), + armnnDelegate::TfLiteArmnnDelegateDelete); + CHECK(theArmnnDelegate != nullptr); + // Modify armnnDelegateInterpreter to use armnnDelegate + CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); + + // Set input data + armnnDelegate::FillInput(tfLiteInterpreter, 0, inputValues); + armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputValues); + + // Run EnqueueWorkload + CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); + CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); + + armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues); +} + +} // anonymous namespace \ No newline at end of file diff --git a/delegate/src/test/TestUtils.hpp b/delegate/src/test/TestUtils.hpp index 284eaa74f5..b165920762 100644 --- a/delegate/src/test/TestUtils.hpp +++ b/delegate/src/test/TestUtils.hpp @@ -61,6 +61,9 @@ void CompareOutputData(std::unique_ptr& tfLiteInterpreter, auto armnnDelegateOutputTensor = armnnDelegateInterpreter->tensor(armnnDelegateOutputId); auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateOutputId); + CHECK(expectedOutputShape.size() == tfLiteDelegateOutputTensor->dims->size); + CHECK(expectedOutputShape.size() == armnnDelegateOutputTensor->dims->size); + for (size_t i = 0; i < expectedOutputShape.size(); i++) { CHECK(expectedOutputShape[i] == armnnDelegateOutputTensor->dims->data[i]); -- cgit v1.2.1