From cbf79298f73310fe4ca5d760ded73575e4bf8fad Mon Sep 17 00:00:00 2001 From: Idriss Chaouch Date: Fri, 8 Sep 2023 11:18:16 +0100 Subject: IVGCVSW-8037 Add BROADCAST_TO to tflite classic and opaque delegate. Signed-off-by: Idriss Chaouch Change-Id: Ibc145d0ea1ac9414b6a68b5b547bf2ea2852fd36 --- delegate/CMakeLists.txt | 2 + delegate/classic/CMakeLists.txt | 1 + delegate/classic/src/BroadcastTo.hpp | 122 +++++++++++++++++++++++ delegate/classic/src/armnn_delegate.cpp | 7 ++ delegate/opaque/CMakeLists.txt | 1 + delegate/opaque/src/BroadcastTo.hpp | 141 +++++++++++++++++++++++++++ delegate/opaque/src/armnn_delegate.cpp | 7 ++ delegate/test/BroadcastToTest.cpp | 80 +++++++++++++++ delegate/test/BroadcastToTestHelper.hpp | 167 ++++++++++++++++++++++++++++++++ 9 files changed, 528 insertions(+) create mode 100644 delegate/classic/src/BroadcastTo.hpp create mode 100644 delegate/opaque/src/BroadcastTo.hpp create mode 100644 delegate/test/BroadcastToTest.cpp create mode 100644 delegate/test/BroadcastToTestHelper.hpp (limited to 'delegate') diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt index c1bf73a6ab..d92611f84b 100644 --- a/delegate/CMakeLists.txt +++ b/delegate/CMakeLists.txt @@ -134,6 +134,8 @@ if(BUILD_UNIT_TESTS) test/BatchMatMulTestHelper.hpp test/BatchSpaceTest.cpp test/BatchSpaceTestHelper.hpp + test/BroadcastToTest.cpp + test/BroadcastToTestHelper.hpp test/CastTest.cpp test/CastTestHelper.hpp test/ComparisonTest.cpp diff --git a/delegate/classic/CMakeLists.txt b/delegate/classic/CMakeLists.txt index 7807153359..dfd0cf985d 100644 --- a/delegate/classic/CMakeLists.txt +++ b/delegate/classic/CMakeLists.txt @@ -13,6 +13,7 @@ list(APPEND armnnClassicDelegateObject_sources src/ArgMinMax.hpp src/BatchMatMul.hpp src/BatchSpace.hpp + src/BroadcastTo.hpp src/ClassicDelegateUtils.hpp src/Comparison.hpp src/Convolution.hpp diff --git a/delegate/classic/src/BroadcastTo.hpp b/delegate/classic/src/BroadcastTo.hpp new file mode 100644 index 0000000000..92aed79982 --- /dev/null +++ b/delegate/classic/src/BroadcastTo.hpp @@ -0,0 +1,122 @@ +// +// Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +namespace armnnDelegate +{ + TfLiteStatus ValidateBroadcastToOperator(DelegateData& delegateData, + TfLiteContext* tfLiteContext, + const armnn::TensorInfo& inputInfo, + const armnn::TensorInfo& outputInfo, + const armnn::BroadcastToDescriptor& descriptor) + { + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC("BROADCAST_TO", + tfLiteContext, + IsBroadcastToSupported, + delegateData.m_Backends, + isSupported, + armnn::BackendId(), + inputInfo, + outputInfo, + descriptor); + return isSupported ? kTfLiteOk : kTfLiteError; + } + + TfLiteStatus VisitBroadcastToOperator(DelegateData& delegateData, + TfLiteContext* tfLiteContext, + TfLiteNode* tfLiteNode, + int nodeIndex, + int32_t broadcastToOperatorCode) + { + TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex)); + TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + + const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors; + + // The input contains the data that should be broadcasted + const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]]; + if (IsDynamicTensor(tfLiteInputTensor)) + { + TF_LITE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ", + broadcastToOperatorCode, nodeIndex); + return kTfLiteError; + } + + // The shape tensor contains the new shape to be applied on the input + const TfLiteTensor& tfLiteShapeTensor = tfLiteTensors[tfLiteNode->inputs->data[1]]; + if (IsDynamicTensor(tfLiteShapeTensor)) + { + TF_LITE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ", + broadcastToOperatorCode, nodeIndex); + return kTfLiteError; + } + + // The output tensor + const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]]; + if (IsDynamicTensor(tfLiteOutputTensor)) + { + TF_LITE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ", + broadcastToOperatorCode, nodeIndex); + return kTfLiteError; + } + + const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor); + const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor); + + auto* shapeData = tflite::GetTensorData(&tfLiteShapeTensor); + auto shapeTensorNum = tfLiteShapeTensor.dims->data[0]; + + armnn::BroadcastToDescriptor broadcastToDescriptor; + broadcastToDescriptor.m_BroadcastToShape = armnn::TensorShape(shapeTensorNum, + shapeData); + + // No network pointer indicates that only support for this operator should be checked + if (!delegateData.m_Network) + { + return ValidateBroadcastToOperator(delegateData, + tfLiteContext, + inputTensorInfo, + outputTensorInfo, + broadcastToDescriptor); + } + + auto layerName = GetLayerName(armnn::LayerType::BroadcastTo, nodeIndex); + armnn::IConnectableLayer* layer = delegateData.m_Network->AddBroadcastToLayer(broadcastToDescriptor, + layerName.c_str()); + + if (layer == nullptr) + { + return kTfLiteError; + } + + layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); + + if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk) + { + return kTfLiteError; + } + + return Connect(layer, tfLiteNode, delegateData); + } + +} // namespace armnnDelegate \ No newline at end of file diff --git a/delegate/classic/src/armnn_delegate.cpp b/delegate/classic/src/armnn_delegate.cpp index de2aa0c632..c428d46d87 100644 --- a/delegate/classic/src/armnn_delegate.cpp +++ b/delegate/classic/src/armnn_delegate.cpp @@ -11,6 +11,7 @@ #include "ArgMinMax.hpp" #include "BatchMatMul.hpp" #include "BatchSpace.hpp" +#include "BroadcastTo.hpp" #include "Comparison.hpp" #include "Convolution.hpp" #include "Control.hpp" @@ -603,6 +604,12 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData, tfLiteNode, nodeIndex, kTfLiteBuiltinBatchToSpaceNd); + case kTfLiteBuiltinBroadcastTo: + return VisitBroadcastToOperator(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + kTfLiteBuiltinBroadcastTo); case kTfLiteBuiltinCast: return VisitCastOperator(delegateData, tfLiteContext, diff --git a/delegate/opaque/CMakeLists.txt b/delegate/opaque/CMakeLists.txt index c05bccf8c9..365e0166ba 100644 --- a/delegate/opaque/CMakeLists.txt +++ b/delegate/opaque/CMakeLists.txt @@ -13,6 +13,7 @@ list(APPEND armnnOpaqueDelegateObject_sources src/armnn_external_delegate.cpp src/BatchMatMul.hpp src/BatchSpace.hpp + src/BroadcastTo.hpp src/Comparison.hpp src/Control.hpp src/Convolution.hpp diff --git a/delegate/opaque/src/BroadcastTo.hpp b/delegate/opaque/src/BroadcastTo.hpp new file mode 100644 index 0000000000..379587546f --- /dev/null +++ b/delegate/opaque/src/BroadcastTo.hpp @@ -0,0 +1,141 @@ +// +// Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include + +namespace armnnOpaqueDelegate +{ + TfLiteStatus ValidateBroadcastToOperator(DelegateData& delegateData, + TfLiteOpaqueContext *tfLiteContext, + const armnn::TensorInfo& inputInfo, + const armnn::TensorInfo& outputInfo, + const armnn::BroadcastToDescriptor& descriptor) + { + bool isSupported = false; + FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("BROADCAST_TO", + tfLiteContext, + IsBroadcastToSupported, + delegateData.m_Backends, + isSupported, + armnn::BackendId(), + inputInfo, + outputInfo, + descriptor); + return isSupported ? kTfLiteOk : kTfLiteError; + } + + TfLiteStatus VisitBroadcastToOperator(DelegateData& delegateData, + TfLiteOpaqueContext* tfLiteContext, + TfLiteOpaqueNode* tfLiteNode, + int nodeIndex, + int32_t broadcastToOperatorCode) + { + TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex)); + TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + + // Gather input tensors + auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode); + const int* inputTensors; + if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ", + nodeIndex); + return kTfLiteError; + } + + // Gather output tensors + int numOutputs = 0; + const int* outputTensors; + if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, + &numOutputs) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ", + nodeIndex); + return kTfLiteError; + } + + // The input contains the data + const TfLiteOpaqueTensor* tfLiteInputTensor = + TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]); + if (IsDynamicTensor(tfLiteInputTensor)) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ", + broadcastToOperatorCode, nodeIndex); + return kTfLiteError; + } + + // The shape tensor + const TfLiteOpaqueTensor* tfLiteShapeTensor = + TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]);; + if (IsDynamicTensor(tfLiteShapeTensor)) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ", + broadcastToOperatorCode, nodeIndex); + return kTfLiteError; + } + + // The output tensor + const TfLiteOpaqueTensor* tfLiteOutputTensor = + TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]); + if (IsDynamicTensor(tfLiteOutputTensor)) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ", + broadcastToOperatorCode, nodeIndex); + return kTfLiteError; + } + + const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor); + const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, + true); + + auto* shapeData = static_cast(TfLiteOpaqueTensorData(tfLiteShapeTensor)); + int32_t shapeTensorNum = TfLiteOpaqueTensorDim(tfLiteShapeTensor, 0); + + armnn::BroadcastToDescriptor broadcastToDescriptor; + broadcastToDescriptor.m_BroadcastToShape = armnn::TensorShape(shapeTensorNum, + shapeData); + + // No network pointer indicates that only support for this operator should be checked + if (!delegateData.m_Network) + { + return ValidateBroadcastToOperator(delegateData, + tfLiteContext, + inputTensorInfo, + outputTensorInfo, + broadcastToDescriptor); + } + + std::string layerName("BroadcastTo"); + armnn::IConnectableLayer* layer = delegateData.m_Network->AddBroadcastToLayer(broadcastToDescriptor, + layerName.c_str()); + + if (layer == nullptr) + { + return kTfLiteError; + } + + layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); + + if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk) + { + return kTfLiteError; + } + + return Connect(layer, tfLiteContext, tfLiteNode, delegateData); + } + +} // namespace armnnOpaqueDelegate \ No newline at end of file diff --git a/delegate/opaque/src/armnn_delegate.cpp b/delegate/opaque/src/armnn_delegate.cpp index bad1abaa59..08b1504efb 100644 --- a/delegate/opaque/src/armnn_delegate.cpp +++ b/delegate/opaque/src/armnn_delegate.cpp @@ -10,6 +10,7 @@ #include "ArgMinMax.hpp" #include "BatchMatMul.hpp" #include "BatchSpace.hpp" +#include "BroadcastTo.hpp" #include "Comparison.hpp" #include "Convolution.hpp" #include "Control.hpp" @@ -654,6 +655,12 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData, tfLiteNode, nodeIndex, kTfLiteBuiltinBatchMatmul); + case kTfLiteBuiltinBroadcastTo: + return VisitBroadcastToOperator(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + kTfLiteBuiltinBroadcastTo); case kTfLiteBuiltinBatchToSpaceNd: return VisitBatchToSpaceNdOperator(delegateData, tfLiteContext, diff --git a/delegate/test/BroadcastToTest.cpp b/delegate/test/BroadcastToTest.cpp new file mode 100644 index 0000000000..f4692cfb07 --- /dev/null +++ b/delegate/test/BroadcastToTest.cpp @@ -0,0 +1,80 @@ +// +// Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "BroadcastToTestHelper.hpp" + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace armnnDelegate +{ +template +void BroadcastToTest(std::vector &backends, tflite::TensorType inputTensorType) +{ + // Set input data + std::vector inputValues = { + 0, 1, 2, 3 + }; + // Set output data + std::vector expectedOutputValues = { + 0, 1, 2, 3, + 0, 1, 2, 3, + 0, 1, 2, 3 + }; + + // The shape data + const std::vector shapeData = {3, 4}; + + // Set shapes + const std::vector inputShape = {1, 4}; + const std::vector shapeShape = {2}; + const std::vector expectedOutputShape = {3, 4}; + + BroadcastToTestImpl(inputTensorType, + tflite::BuiltinOperator_BROADCAST_TO, + backends, + inputValues, + inputShape, + shapeShape, + shapeData, + expectedOutputValues, + expectedOutputShape); +} + +TEST_SUITE("BroadcastToTests_CpuRefTests") +{ + + TEST_CASE ("BroadcastTo_int_CpuRef_Test") + { + std::vector backends = {armnn::Compute::CpuRef}; + BroadcastToTest(backends, ::tflite::TensorType::TensorType_INT32); + } + + TEST_CASE ("BroadcastTo_Float32_CpuRef_Test") + { + std::vector backends = {armnn::Compute::CpuRef}; + BroadcastToTest(backends, ::tflite::TensorType::TensorType_FLOAT32); + } + + TEST_CASE ("BroadcastTo_Uint8_t_CpuRef_Test") + { + std::vector backends = {armnn::Compute::CpuRef}; + BroadcastToTest(backends, ::tflite::TensorType::TensorType_UINT8); + } + + TEST_CASE ("BroadcastTo_Int8_t_CpuRef_Test") + { + std::vector backends = {armnn::Compute::CpuRef}; + BroadcastToTest(backends, ::tflite::TensorType::TensorType_INT8); + } + +} // TEST_SUITE("BroadcastToTests_CpuRefTests") +} \ No newline at end of file diff --git a/delegate/test/BroadcastToTestHelper.hpp b/delegate/test/BroadcastToTestHelper.hpp new file mode 100644 index 0000000000..630fe3aaf1 --- /dev/null +++ b/delegate/test/BroadcastToTestHelper.hpp @@ -0,0 +1,167 @@ +// +// Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "TestUtils.hpp" + +#include +#include + +#include +#include +#include + +#include + +#include + +namespace +{ + std::vector CreateBroadcastToTfLiteModel(tflite::BuiltinOperator operatorCode, + tflite::TensorType inputTensorType, + const std::vector& inputTensorShape, + const std::vector& shapeTensorShape, + const std::vector& shapeTensorData, + const std::vector& outputTensorShape) + { + using namespace tflite; + flatbuffers::FlatBufferBuilder flatBufferBuilder; + + std::vector> buffers; + buffers.push_back(CreateBuffer(flatBufferBuilder)); + buffers.push_back(CreateBuffer(flatBufferBuilder)); + buffers.push_back(CreateBuffer(flatBufferBuilder, + flatBufferBuilder.CreateVector( + reinterpret_cast(shapeTensorData.data()), + sizeof(int32_t) * shapeTensorData.size()))); + buffers.push_back(CreateBuffer(flatBufferBuilder)); + + float qScale = 1.0f; + int32_t qOffset = 0; + + auto quantizationParameters = + CreateQuantizationParameters(flatBufferBuilder, + 0, + 0, + flatBufferBuilder.CreateVector({ qScale }), + flatBufferBuilder.CreateVector({ qOffset })); + + std::array, 3> tensors; + tensors[0] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(inputTensorShape.data(), + inputTensorShape.size()), + inputTensorType, + 1, + flatBufferBuilder.CreateString("input_tensor"), + quantizationParameters); + + tensors[1] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(shapeTensorShape.data(), + shapeTensorShape.size()), + TensorType_INT32, + 2, + flatBufferBuilder.CreateString("shape_input_tensor"), + quantizationParameters); + + tensors[2] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(outputTensorShape.data(), + outputTensorShape.size()), + inputTensorType, + 3, + flatBufferBuilder.CreateString("output_tensor"), + quantizationParameters); + + // Create Operator + tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_BroadcastToOptions; + flatbuffers::Offset operatorBuiltinOption = 0; + + const std::vector operatorInputs {0, 1}; + const std::vector operatorOutputs {2}; + + flatbuffers::Offset broadcastOperator = + CreateOperator(flatBufferBuilder, + 0, + flatBufferBuilder.CreateVector(operatorInputs.data(), operatorInputs.size()), + flatBufferBuilder.CreateVector(operatorOutputs.data(), operatorOutputs.size()), + operatorBuiltinOptionsType, + operatorBuiltinOption); + + const std::vector subgraphInputs{0, 1}; + const std::vector subgraphOutputs{2}; + flatbuffers::Offset subgraph = + CreateSubGraph(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensors.data(), tensors.size()), + flatBufferBuilder.CreateVector(subgraphInputs.data(), subgraphInputs.size()), + flatBufferBuilder.CreateVector(subgraphOutputs.data(), subgraphOutputs.size()), + flatBufferBuilder.CreateVector(&broadcastOperator, 1)); + + flatbuffers::Offset modelDescription = + flatBufferBuilder.CreateString("ArmnnDelegate: BrodacastTo Operator Model"); + flatbuffers::Offset opCode = CreateOperatorCode(flatBufferBuilder,0, + 0, 2, + tflite::BuiltinOperator_BROADCAST_TO); + + flatbuffers::Offset flatbufferModel = + CreateModel(flatBufferBuilder, + TFLITE_SCHEMA_VERSION, + flatBufferBuilder.CreateVector(&opCode, 1), + flatBufferBuilder.CreateVector(&subgraph, 1), + modelDescription, + flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); + + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); + + return std::vector(flatBufferBuilder.GetBufferPointer(), + flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); + } + + template + void BroadcastToTestImpl(tflite::TensorType inputTensorType, + tflite::BuiltinOperator operatorCode, + std::vector& backends, + std::vector& inputValues, + std::vector inputShape, + std::vector shapeShapes, + std::vector shapeData, + std::vector& expectedOutputValues, + std::vector expectedOutputShape) + { + using namespace delegateTestInterpreter; + + std::vector modelBuffer = CreateBroadcastToTfLiteModel(operatorCode, + inputTensorType, + inputShape, + shapeShapes, + shapeData, + expectedOutputShape); + + + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(shapeData, 1) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(shapeData, 1) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape); + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); + } + +} // anonymous namespace \ No newline at end of file -- cgit v1.2.1