From 50c87d39173cb48fc216ccb585714b669b095611 Mon Sep 17 00:00:00 2001 From: Narumol Prangnawarat Date: Mon, 9 Nov 2020 18:42:11 +0000 Subject: IVGCVSW-5387 TfLiteDelegate: Implement the Pooling operators * Implement MaxPool2d operators * Add QAsymmS8 to armnn delegate * Unit tests Signed-off-by: Narumol Prangnawarat Change-Id: I1815ade6ccda3e9331bd3a68e164be0f6947e9df --- delegate/CMakeLists.txt | 4 +- delegate/src/DelegateUtils.hpp | 9 +- delegate/src/Pooling.hpp | 110 ++++++++- delegate/src/test/Pooling2dTest.cpp | 361 ++++++++++++++++++++++++++++++ delegate/src/test/Pooling2dTestHelper.hpp | 212 ++++++++++++++++++ 5 files changed, 692 insertions(+), 4 deletions(-) create mode 100644 delegate/src/test/Pooling2dTest.cpp create mode 100644 delegate/src/test/Pooling2dTestHelper.hpp diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt index 0a1a3e43e4..e05a0baff4 100644 --- a/delegate/CMakeLists.txt +++ b/delegate/CMakeLists.txt @@ -95,6 +95,8 @@ list(APPEND armnnDelegate_unittest_sources src/test/ElementwiseBinaryTestHelper.hpp src/test/ElementwiseUnaryTest.cpp src/test/ElementwiseUnaryTestHelper.hpp + src/test/Pooling2dTest.cpp + src/test/Pooling2dTestHelper.hpp src/test/QuantizationTest.cpp src/test/QuantizationTestHelper.hpp) @@ -159,4 +161,4 @@ export( FILE ${CMAKE_CURRENT_BINARY_DIR}/ArmnnDelegateTargets.cmake NAMESPACE ArmnnDelegate::) -#################################################### \ No newline at end of file +#################################################### diff --git a/delegate/src/DelegateUtils.hpp b/delegate/src/DelegateUtils.hpp index f12aee9d2b..729a8b4e98 100644 --- a/delegate/src/DelegateUtils.hpp +++ b/delegate/src/DelegateUtils.hpp @@ -317,7 +317,14 @@ armnn::TensorInfo GetTensorInfoForTfLiteTensor(const TfLiteTensor& tfLiteTensor) type = armnn::DataType::QAsymmU8; break; case kTfLiteInt8: - type = armnn::DataType::QSymmS8; + if (tfLiteTensor.params.zero_point == 0) + { + type = armnn::DataType::QSymmS8; + } + else + { + type = armnn::DataType::QAsymmS8; + } break; case kTfLiteInt16: type = armnn::DataType::QSymmS16; diff --git a/delegate/src/Pooling.hpp b/delegate/src/Pooling.hpp index ff3d668545..28e26f6504 100644 --- a/delegate/src/Pooling.hpp +++ b/delegate/src/Pooling.hpp @@ -13,13 +13,119 @@ namespace armnnDelegate { +void CalcPadding(uint32_t inputSize, + uint32_t filterSize, + uint32_t stride, + uint32_t dilation, + uint32_t& paddingFront, + uint32_t& paddingBack, + TfLitePadding padding) +{ + paddingFront = 0; + paddingBack = 0; + if (padding == kTfLitePaddingSame) + { + uint32_t outputSize = (inputSize + stride - 1) / stride; + uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1); + uint32_t temp = (outputSize - 1) * stride + dilatedSize; + if (temp > inputSize) + { + paddingFront = (temp - inputSize) / 2; + paddingBack = (temp - inputSize) - paddingFront; + } + } +} + TfLiteStatus VisitPoolingOperator(DelegateData& delegateData, TfLiteContext* tfLiteContext, TfLiteNode* tfLiteNode, int nodeIndex, - int32_t poolingOperatorCode) + int32_t tfLitePoolingOperatorCode) { - return kTfLiteError; + TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + + const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors; + const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]]; + if (IsDynamicTensor(tfLiteInputTensor)) + { + TF_LITE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ", + tfLitePoolingOperatorCode, nodeIndex); + return kTfLiteError; + } + + const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]]; + if (IsDynamicTensor(tfLiteOutputTensor)) + { + TF_LITE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ", + tfLitePoolingOperatorCode, nodeIndex); + return kTfLiteError; + } + + const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor); + const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor); + + armnn::PoolingAlgorithm poolingAlgorithm; + switch(tfLitePoolingOperatorCode) + { + case kTfLiteBuiltinMaxPool2d: + poolingAlgorithm = armnn::PoolingAlgorithm::Max; + break; + default: + return kTfLiteError; + } + + armnn::Pooling2dDescriptor descriptor; + descriptor.m_PoolType = poolingAlgorithm; + + auto* params = reinterpret_cast(tfLiteNode->builtin_data); + descriptor.m_PoolWidth = params->filter_width; + descriptor.m_PoolHeight = params->filter_height; + descriptor.m_StrideX = params->stride_width; + descriptor.m_StrideY = params->stride_height; + descriptor.m_DataLayout = armnn::DataLayout::NHWC; + + unsigned int inputHeight = inputTensorInfo.GetShape()[1]; + unsigned int inputWidth = inputTensorInfo.GetShape()[2]; + + CalcPadding(inputHeight, descriptor.m_PoolHeight, descriptor.m_StrideY, 1u, + descriptor.m_PadTop, descriptor.m_PadBottom, params->padding); + CalcPadding(inputWidth, descriptor.m_PoolWidth, descriptor.m_StrideX, 1u, + descriptor.m_PadLeft, descriptor.m_PadRight, params->padding); + + bool isSupported = false; + auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC(__func__, + tfLiteContext, + IsPooling2dSupported, + delegateData.m_Backends, + isSupported, + inputTensorInfo, + outputTensorInfo, + descriptor); + }; + + if (!delegateData.m_Network) + { + validateFunc(outputTensorInfo, isSupported); + return isSupported ? kTfLiteOk : kTfLiteError; + } + + armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling2dLayer(descriptor); + ARMNN_ASSERT(poolingLayer != nullptr); + + armnn::IOutputSlot& outputSlot = poolingLayer->GetOutputSlot(0); + outputSlot.SetTensorInfo(outputTensorInfo); + Connect(poolingLayer, tfLiteNode, delegateData); + + // Check activation + TfLiteFusedActivation activationType = params->activation; + return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData); } } // namespace armnnDelegate diff --git a/delegate/src/test/Pooling2dTest.cpp b/delegate/src/test/Pooling2dTest.cpp new file mode 100644 index 0000000000..3671b0b172 --- /dev/null +++ b/delegate/src/test/Pooling2dTest.cpp @@ -0,0 +1,361 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "Pooling2dTestHelper.hpp" + +#include + +#include +#include +#include +#include +#include +#include + +#include + +namespace armnnDelegate +{ + +void MaxPool2dFP32PaddingValidTest(std::vector& backends) +{ + // Set input data + std::vector inputShape { 1, 3, 4, 1 }; + std::vector outputShape { 1, 1, 2, 1 }; + + std::vector inputValues = { -5.0f, 8.0f, -10.0f, 7.0f, + 8.0f, 12.0f, -15.0f, 2.0f, + 3.0f, -4.0f, -1.0f, -11.0f }; + + std::vector expectedOutputValues = { 12.0f, 7.0f }; + + Pooling2dTest(tflite::BuiltinOperator_MAX_POOL_2D, + ::tflite::TensorType_FLOAT32, + backends, + inputShape, + outputShape, + inputValues, + expectedOutputValues, + ::tflite::Padding_VALID, + 2, + 2, + 2, + 2); +} + +void MaxPool2dInt8PaddingValidTest(std::vector& backends) +{ + // Set input data + std::vector inputShape { 1, 3, 4, 1 }; + std::vector outputShape { 1, 1, 2, 1 }; + + std::vector inputValues = { -5, 8, -10, 7, + 8, 12, -15, 2, + 3, -4, -1, -11 }; + + std::vector expectedOutputValues = { 12, 7 }; + + Pooling2dTest(tflite::BuiltinOperator_MAX_POOL_2D, + ::tflite::TensorType_INT8, + backends, + inputShape, + outputShape, + inputValues, + expectedOutputValues, + ::tflite::Padding_VALID, + 2, + 2, + 2, + 2, + tflite::ActivationFunctionType_NONE, + 2.5f, + 1); +} + +void MaxPool2dFP32PaddingSameTest(std::vector& backends) +{ + // Set input data + std::vector inputShape { 1, 3, 4, 1 }; + std::vector outputShape { 1, 2, 2, 1 }; + + std::vector inputValues = { -5.0f, 8.0f, -10.0f, 7.0f, + 8.0f, 12.0f, -15.0f, 2.0f, + 3.0f, -4.0f, -1.0f, -11.0f }; + + std::vector expectedOutputValues = { 12.0f, 7.0f, 3.0f, -1.0f }; + + Pooling2dTest(tflite::BuiltinOperator_MAX_POOL_2D, + ::tflite::TensorType_FLOAT32, + backends, + inputShape, + outputShape, + inputValues, + expectedOutputValues, + ::tflite::Padding_SAME, + 2, + 2, + 2, + 2); +} + +void MaxPool2dInt8PaddingSameTest(std::vector& backends) +{ + // Set input data + std::vector inputShape { 1, 3, 4, 1 }; + std::vector outputShape { 1, 2, 2, 1 }; + + std::vector inputValues = { -5, 8, -10, 7, + 8, 12, -15, 2, + 3, -4, -1, -11 }; + + std::vector expectedOutputValues = { 12, 7, 3, -1 }; + + Pooling2dTest(tflite::BuiltinOperator_MAX_POOL_2D, + ::tflite::TensorType_INT8, + backends, + inputShape, + outputShape, + inputValues, + expectedOutputValues, + ::tflite::Padding_SAME, + 2, + 2, + 2, + 2, + tflite::ActivationFunctionType_NONE, + 2.5f, + 1); +} + +void MaxPool2dFP32ReluTest(std::vector& backends) +{ + // Set input data + std::vector inputShape { 1, 3, 4, 1 }; + std::vector outputShape { 1, 2, 3, 1 }; + + std::vector inputValues = { -5.0f, -8.0f, -10.0f, 7.0f, + -8.0f, -12.0f, -15.0f, 2.0f, + 3.0f, -4.0f, -1.0f, -11.0f }; + + std::vector expectedOutputValues = { 0.0f, 0.0f, 7.0f, 3.0f, 0.0f, 2.0f }; + + Pooling2dTest(tflite::BuiltinOperator_MAX_POOL_2D, + ::tflite::TensorType_FLOAT32, + backends, + inputShape, + outputShape, + inputValues, + expectedOutputValues, + ::tflite::Padding_VALID, + 1, + 1, + 2, + 2, + ::tflite::ActivationFunctionType_RELU); +} + +void MaxPool2dInt8ReluTest(std::vector& backends) +{ + // Set input data + std::vector inputShape { 1, 3, 4, 1 }; + std::vector outputShape { 1, 2, 3, 1 }; + + std::vector inputValues = { -5, -8, -10, 7, + -8, -12, -15, 2, + 3, -4, -1, -11 }; + + std::vector expectedOutputValues = { 1, 1, 7, 3, 1, 2 }; + + Pooling2dTest(tflite::BuiltinOperator_MAX_POOL_2D, + ::tflite::TensorType_INT8, + backends, + inputShape, + outputShape, + inputValues, + expectedOutputValues, + ::tflite::Padding_VALID, + 1, + 1, + 2, + 2, + ::tflite::ActivationFunctionType_RELU, + 2.0f, + 1); +} + +void MaxPool2dFP32Relu6Test(std::vector& backends) +{ + // Set input data + std::vector inputShape { 1, 3, 4, 1 }; + std::vector outputShape { 1, 2, 2, 1 }; + + std::vector inputValues = { -5.0f, -8.0f, -10.0f, 7.0f, + -8.0f, -12.0f, -15.0f, 2.0f, + 3.0f, -4.0f, -1.0f, -11.0f }; + + std::vector expectedOutputValues = { 0.0f, 0.0f, 3.0f, 0.0f }; + + Pooling2dTest(tflite::BuiltinOperator_MAX_POOL_2D, + ::tflite::TensorType_FLOAT32, + backends, + inputShape, + outputShape, + inputValues, + expectedOutputValues, + ::tflite::Padding_SAME, + 2, + 2, + 1, + 1, + ::tflite::ActivationFunctionType_RELU6); +} + +void MaxPool2dInt8Relu6Test(std::vector& backends) +{ + // Set input data + std::vector inputShape { 1, 3, 4, 1 }; + std::vector outputShape { 1, 2, 2, 1 }; + + std::vector inputValues = { -5, -8, -10, 7, + -8, -12, -15, 2, + 3, -4, -1, -11 }; + + std::vector expectedOutputValues = { 1, 1, 3, 1 }; + + Pooling2dTest(tflite::BuiltinOperator_MAX_POOL_2D, + ::tflite::TensorType_INT8, + backends, + inputShape, + outputShape, + inputValues, + expectedOutputValues, + ::tflite::Padding_SAME, + 2, + 2, + 1, + 1, + ::tflite::ActivationFunctionType_RELU6, + 2.0f, + 1); +} + +TEST_SUITE("Pooling2dTest") +{ + +TEST_CASE ("MaxPooling2d_FP32_PaddingValid_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + MaxPool2dFP32PaddingValidTest(backends); +} + +TEST_CASE ("MaxPooling2d_FP32_PaddingValid_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + MaxPool2dFP32PaddingValidTest(backends); +} + +TEST_CASE ("MaxPooling2d_Int8_PaddingValid_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + MaxPool2dInt8PaddingValidTest(backends); +} + +TEST_CASE ("MaxPooling2d_Int8_PaddingValid_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + MaxPool2dInt8PaddingValidTest(backends); +} + +TEST_CASE ("MaxPooling2d_FP32_PaddingSame_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + MaxPool2dFP32PaddingSameTest(backends); +} + +TEST_CASE ("MaxPooling2d_FP32_PaddingSame_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + MaxPool2dFP32PaddingSameTest(backends); +} + +TEST_CASE ("MaxPooling2d_Int8_PaddingSame_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + MaxPool2dInt8PaddingSameTest(backends); +} + +TEST_CASE ("MaxPooling2d_Int8_PaddingSame_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + MaxPool2dInt8PaddingSameTest(backends); +} + +TEST_CASE ("MaxPooling2d_FP32_Relu_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + MaxPool2dFP32ReluTest(backends); +} + +TEST_CASE ("MaxPooling2d_FP32_Relu_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + MaxPool2dFP32ReluTest(backends); +} + +TEST_CASE ("MaxPooling2d_Int8_Relu_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + MaxPool2dInt8ReluTest(backends); +} + +TEST_CASE ("MaxPooling2d_Int8_Relu_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + MaxPool2dInt8ReluTest(backends); +} + +TEST_CASE ("MaxPooling2d_FP32_Relu6_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + MaxPool2dFP32Relu6Test(backends); +} + +TEST_CASE ("MaxPooling2d_FP32_Relu6_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + MaxPool2dFP32Relu6Test(backends); +} + +TEST_CASE ("MaxPooling2d_Int8_Relu6_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + MaxPool2dInt8Relu6Test(backends); +} + +TEST_CASE ("MaxPooling2d_Int8_Relu6_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + MaxPool2dInt8Relu6Test(backends); +} + +} + +} // namespace armnnDelegate \ No newline at end of file diff --git a/delegate/src/test/Pooling2dTestHelper.hpp b/delegate/src/test/Pooling2dTestHelper.hpp new file mode 100644 index 0000000000..a344650814 --- /dev/null +++ b/delegate/src/test/Pooling2dTestHelper.hpp @@ -0,0 +1,212 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include + +#include +#include +#include +#include +#include +#include + +#include + +namespace +{ + +std::vector CreatePooling2dTfLiteModel( + tflite::BuiltinOperator poolingOperatorCode, + tflite::TensorType tensorType, + const std::vector & inputTensorShape, + const std::vector & outputTensorShape, + tflite::Padding padding = tflite::Padding_SAME, + int32_t strideWidth = 0, + int32_t strideHeight = 0, + int32_t filterWidth = 0, + int32_t filterHeight = 0, + tflite::ActivationFunctionType fusedActivation = tflite::ActivationFunctionType_NONE, + float quantScale = 1.0f, + int quantOffset = 0) +{ + using namespace tflite; + flatbuffers::FlatBufferBuilder flatBufferBuilder; + + std::vector> buffers; + buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}))); + + auto quantizationParameters = + CreateQuantizationParameters(flatBufferBuilder, + 0, + 0, + flatBufferBuilder.CreateVector({ quantScale }), + flatBufferBuilder.CreateVector({ quantOffset })); + + std::array, 2> tensors; + tensors[0] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(inputTensorShape.data(), + inputTensorShape.size()), + tensorType, + 0, + flatBufferBuilder.CreateString("input"), + quantizationParameters); + + tensors[1] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(outputTensorShape.data(), + outputTensorShape.size()), + tensorType, + 0, + flatBufferBuilder.CreateString("output"), + quantizationParameters); + + // create operator + tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_Pool2DOptions; + flatbuffers::Offset operatorBuiltinOptions = CreatePool2DOptions(flatBufferBuilder, + padding, + strideWidth, + strideHeight, + filterWidth, + filterHeight, + fusedActivation).Union(); + + const std::vector operatorInputs{{0}}; + const std::vector operatorOutputs{{1}}; + flatbuffers::Offset poolingOperator = + CreateOperator(flatBufferBuilder, + 0, + flatBufferBuilder.CreateVector(operatorInputs.data(), operatorInputs.size()), + flatBufferBuilder.CreateVector(operatorOutputs.data(), operatorOutputs.size()), + operatorBuiltinOptionsType, + operatorBuiltinOptions); + + const std::vector subgraphInputs{{0}}; + const std::vector subgraphOutputs{{1}}; + flatbuffers::Offset subgraph = + CreateSubGraph(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensors.data(), tensors.size()), + flatBufferBuilder.CreateVector(subgraphInputs.data(), subgraphInputs.size()), + flatBufferBuilder.CreateVector(subgraphOutputs.data(), subgraphOutputs.size()), + flatBufferBuilder.CreateVector(&poolingOperator, 1)); + + flatbuffers::Offset modelDescription = + flatBufferBuilder.CreateString("ArmnnDelegate: Pooling2d Operator Model"); + flatbuffers::Offset operatorCode = CreateOperatorCode(flatBufferBuilder, poolingOperatorCode); + + flatbuffers::Offset flatbufferModel = + CreateModel(flatBufferBuilder, + TFLITE_SCHEMA_VERSION, + flatBufferBuilder.CreateVector(&operatorCode, 1), + flatBufferBuilder.CreateVector(&subgraph, 1), + modelDescription, + flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); + + flatBufferBuilder.Finish(flatbufferModel); + + return std::vector(flatBufferBuilder.GetBufferPointer(), + flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); +} + +template +void Pooling2dTest(tflite::BuiltinOperator poolingOperatorCode, + tflite::TensorType tensorType, + std::vector& backends, + std::vector& inputShape, + std::vector& outputShape, + std::vector& inputValues, + std::vector& expectedOutputValues, + tflite::Padding padding = tflite::Padding_SAME, + int32_t strideWidth = 0, + int32_t strideHeight = 0, + int32_t filterWidth = 0, + int32_t filterHeight = 0, + tflite::ActivationFunctionType fusedActivation = tflite::ActivationFunctionType_NONE, + float quantScale = 1.0f, + int quantOffset = 0) +{ + using namespace tflite; + std::vector modelBuffer = CreatePooling2dTfLiteModel(poolingOperatorCode, + tensorType, + inputShape, + outputShape, + padding, + strideWidth, + strideHeight, + filterWidth, + filterHeight, + fusedActivation, + quantScale, + quantOffset); + + const Model* tfLiteModel = GetModel(modelBuffer.data()); + CHECK(tfLiteModel != nullptr); + // Create TfLite Interpreters + std::unique_ptr armnnDelegateInterpreter; + CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) + (&armnnDelegateInterpreter) == kTfLiteOk); + CHECK(armnnDelegateInterpreter != nullptr); + CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); + + std::unique_ptr tfLiteInterpreter; + CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) + (&tfLiteInterpreter) == kTfLiteOk); + CHECK(tfLiteInterpreter != nullptr); + CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); + + // Create the ArmNN Delegate + armnnDelegate::DelegateOptions delegateOptions(backends); + std::unique_ptr + theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), + armnnDelegate::TfLiteArmnnDelegateDelete); + CHECK(theArmnnDelegate != nullptr); + // Modify armnnDelegateInterpreter to use armnnDelegate + CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); + + // Set input data + auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0]; + auto tfLiteDelegateInputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateInputId); + for (unsigned int i = 0; i < inputValues.size(); ++i) + { + tfLiteDelegateInputData[i] = inputValues[i]; + } + + auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0]; + auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateInputId); + for (unsigned int i = 0; i < inputValues.size(); ++i) + { + armnnDelegateInputData[i] = inputValues[i]; + } + + // Run EnqueueWorkload + CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); + CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); + // Compare output data + auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0]; + auto tfLiteDelegateOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); + auto tfLiteDelegateOutputTensor = tfLiteInterpreter->tensor(tfLiteDelegateOutputId); + auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0]; + auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateOutputId); + auto armnnDelegateOutputTensor = armnnDelegateInterpreter->tensor(armnnDelegateOutputId); + + for (size_t i = 0; i < tfLiteDelegateOutputTensor->dims->size; i++) + { + CHECK(outputShape[i] == armnnDelegateOutputTensor->dims->data[i]); + CHECK(tfLiteDelegateOutputTensor->dims->data[i] == armnnDelegateOutputTensor->dims->data[i]); + } + + for (size_t i = 0; i < expectedOutputValues.size(); i++) + { + CHECK(expectedOutputValues[i] == armnnDelegateOutputData[i]); + CHECK(tfLiteDelegateOutputData[i] == expectedOutputValues[i]); + CHECK(tfLiteDelegateOutputData[i] == armnnDelegateOutputData[i]); + } +} + +} // anonymous namespace + + + + -- cgit v1.2.1