aboutsummaryrefslogtreecommitdiff
path: root/delegate/src/test
diff options
context:
space:
mode:
authorMatthew Sloyan <matthew.sloyan@arm.com>2020-11-26 10:54:22 +0000
committerMatthew Sloyan <matthew.sloyan@arm.com>2020-12-07 09:41:42 +0000
commitc8eb955a2c9f0b432fe932e2df8445f242080e31 (patch)
treecb3a74ae4d3a2a558f0589a45a6d1ea6d58e02c3 /delegate/src/test
parent97451b4429b717f6ff19c10716d1d82a2ff6f155 (diff)
downloadarmnn-c8eb955a2c9f0b432fe932e2df8445f242080e31.tar.gz
IVGCVSW-5381 TfLiteDelegate: Implement the Logical operators
* Implemented Logical AND, NOT and OR operators. * NOT uses existing ElementwiseUnary VisitLayer function & tests. * AND/OR uses new LogicalBinary VisitLayer function & tests. Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com> Change-Id: I5e7f1e78b30c36ac7f14c70a712b54f98d664b83
Diffstat (limited to 'delegate/src/test')
-rw-r--r--delegate/src/test/ElementwiseUnaryTestHelper.hpp77
-rw-r--r--delegate/src/test/LogicalTest.cpp226
-rw-r--r--delegate/src/test/LogicalTestHelper.hpp198
3 files changed, 488 insertions, 13 deletions
diff --git a/delegate/src/test/ElementwiseUnaryTestHelper.hpp b/delegate/src/test/ElementwiseUnaryTestHelper.hpp
index 2683339eb5..dcc7074753 100644
--- a/delegate/src/test/ElementwiseUnaryTestHelper.hpp
+++ b/delegate/src/test/ElementwiseUnaryTestHelper.hpp
@@ -110,25 +110,76 @@ void ElementwiseUnaryFP32Test(tflite::BuiltinOperator unaryOperatorCode,
CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
// Set input data
- auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
- auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- tfLiteDelageInputData[i] = inputValues[i];
- }
-
- auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
- auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateInputId);
- for (unsigned int i = 0; i < inputValues.size(); ++i)
- {
- armnnDelegateInputData[i] = inputValues[i];
- }
+ armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputValues);
+ armnnDelegate::FillInput(tfLiteInterpreter, 0, inputValues);
+
// Run EnqueWorkload
CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
// Compare output data
armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, inputShape, expectedOutputValues);
+
+ armnnDelegateInterpreter.reset(nullptr);
+ tfLiteInterpreter.reset(nullptr);
+}
+
+void ElementwiseUnaryBoolTest(tflite::BuiltinOperator unaryOperatorCode,
+ std::vector<armnn::BackendId>& backends,
+ std::vector<int32_t>& inputShape,
+ std::vector<bool>& inputValues,
+ std::vector<bool>& expectedOutputValues)
+{
+ using namespace tflite;
+ std::vector<char> modelBuffer = CreateElementwiseUnaryTfLiteModel(unaryOperatorCode,
+ ::tflite::TensorType_BOOL,
+ inputShape);
+
+ const Model* tfLiteModel = GetModel(modelBuffer.data());
+ // Create TfLite Interpreters
+ std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+ CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+ (&armnnDelegateInterpreter) == kTfLiteOk);
+ CHECK(armnnDelegateInterpreter != nullptr);
+ CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+ std::unique_ptr<Interpreter> tfLiteInterpreter;
+ CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+ (&tfLiteInterpreter) == kTfLiteOk);
+ CHECK(tfLiteInterpreter != nullptr);
+ CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+ // Create the ArmNN Delegate
+ armnnDelegate::DelegateOptions delegateOptions(backends);
+ std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+ theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+ armnnDelegate::TfLiteArmnnDelegateDelete);
+ CHECK(theArmnnDelegate != nullptr);
+
+ // Modify armnnDelegateInterpreter to use armnnDelegate
+ CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+ // Set input data
+ armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputValues);
+ armnnDelegate::FillInput(tfLiteInterpreter, 0, inputValues);
+
+ // Run EnqueWorkload
+ CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+ CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+ // Compare output data, comparing Boolean values is handled differently and needs to call the CompareData function
+ // directly instead. This is because Boolean types get converted to a bit representation in a vector.
+ auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
+ auto tfLiteDelegateOutputData = tfLiteInterpreter->typed_tensor<bool>(tfLiteDelegateOutputId);
+ auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
+ auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<bool>(armnnDelegateOutputId);
+
+ armnnDelegate::CompareData(expectedOutputValues, armnnDelegateOutputData, expectedOutputValues.size());
+ armnnDelegate::CompareData(expectedOutputValues, tfLiteDelegateOutputData, expectedOutputValues.size());
+ armnnDelegate::CompareData(tfLiteDelegateOutputData, armnnDelegateOutputData, expectedOutputValues.size());
+
+ armnnDelegateInterpreter.reset(nullptr);
+ tfLiteInterpreter.reset(nullptr);
}
} // anonymous namespace
diff --git a/delegate/src/test/LogicalTest.cpp b/delegate/src/test/LogicalTest.cpp
new file mode 100644
index 0000000000..9fa2d3dde0
--- /dev/null
+++ b/delegate/src/test/LogicalTest.cpp
@@ -0,0 +1,226 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ElementwiseUnaryTestHelper.hpp"
+#include "LogicalTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/schema/schema_generated.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+void LogicalBinaryAndBoolTest(std::vector<armnn::BackendId>& backends)
+{
+ std::vector<int32_t> input0Shape { 1, 2, 2 };
+ std::vector<int32_t> input1Shape { 1, 2, 2 };
+ std::vector<int32_t> expectedOutputShape { 1, 2, 2 };
+
+ // Set input and output values
+ std::vector<bool> input0Values { 0, 0, 1, 1 };
+ std::vector<bool> input1Values { 0, 1, 0, 1 };
+ std::vector<bool> expectedOutputValues { 0, 0, 0, 1 };
+
+ LogicalBinaryTest<bool>(tflite::BuiltinOperator_LOGICAL_AND,
+ ::tflite::TensorType_BOOL,
+ backends,
+ input0Shape,
+ input1Shape,
+ expectedOutputShape,
+ input0Values,
+ input1Values,
+ expectedOutputValues);
+}
+
+void LogicalBinaryAndBroadcastTest(std::vector<armnn::BackendId>& backends)
+{
+ std::vector<int32_t> input0Shape { 1, 2, 2 };
+ std::vector<int32_t> input1Shape { 1, 1, 1 };
+ std::vector<int32_t> expectedOutputShape { 1, 2, 2 };
+
+ std::vector<bool> input0Values { 0, 1, 0, 1 };
+ std::vector<bool> input1Values { 1 };
+ std::vector<bool> expectedOutputValues { 0, 1, 0, 1 };
+
+ LogicalBinaryTest<bool>(tflite::BuiltinOperator_LOGICAL_AND,
+ ::tflite::TensorType_BOOL,
+ backends,
+ input0Shape,
+ input1Shape,
+ expectedOutputShape,
+ input0Values,
+ input1Values,
+ expectedOutputValues);
+}
+
+void LogicalBinaryOrBoolTest(std::vector<armnn::BackendId>& backends)
+{
+ std::vector<int32_t> input0Shape { 1, 2, 2 };
+ std::vector<int32_t> input1Shape { 1, 2, 2 };
+ std::vector<int32_t> expectedOutputShape { 1, 2, 2 };
+
+ std::vector<bool> input0Values { 0, 0, 1, 1 };
+ std::vector<bool> input1Values { 0, 1, 0, 1 };
+ std::vector<bool> expectedOutputValues { 0, 1, 1, 1 };
+
+ LogicalBinaryTest<bool>(tflite::BuiltinOperator_LOGICAL_OR,
+ ::tflite::TensorType_BOOL,
+ backends,
+ input0Shape,
+ input1Shape,
+ expectedOutputShape,
+ input0Values,
+ input1Values,
+ expectedOutputValues);
+}
+
+void LogicalBinaryOrBroadcastTest(std::vector<armnn::BackendId>& backends)
+{
+ std::vector<int32_t> input0Shape { 1, 2, 2 };
+ std::vector<int32_t> input1Shape { 1, 1, 1 };
+ std::vector<int32_t> expectedOutputShape { 1, 2, 2 };
+
+ std::vector<bool> input0Values { 0, 1, 0, 1 };
+ std::vector<bool> input1Values { 1 };
+ std::vector<bool> expectedOutputValues { 1, 1, 1, 1 };
+
+ LogicalBinaryTest<bool>(tflite::BuiltinOperator_LOGICAL_OR,
+ ::tflite::TensorType_BOOL,
+ backends,
+ input0Shape,
+ input1Shape,
+ expectedOutputShape,
+ input0Values,
+ input1Values,
+ expectedOutputValues);
+}
+
+// LogicalNot operator uses ElementwiseUnary unary layer and descriptor but is still classed as logical operator.
+void LogicalNotBoolTest(std::vector<armnn::BackendId>& backends)
+{
+ std::vector<int32_t> inputShape { 1, 2, 2 };
+
+ std::vector<bool> inputValues { 0, 1, 0, 1 };
+ std::vector<bool> expectedOutputValues { 1, 0, 1, 0 };
+
+ ElementwiseUnaryBoolTest(tflite::BuiltinOperator_LOGICAL_NOT,
+ backends,
+ inputShape,
+ inputValues,
+ expectedOutputValues);
+}
+
+TEST_SUITE("LogicalBinaryTests_GpuAccTests")
+{
+
+TEST_CASE ("LogicalBinary_AND_Bool_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ LogicalBinaryAndBoolTest(backends);
+}
+
+TEST_CASE ("LogicalBinary_AND_Broadcast_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ LogicalBinaryAndBroadcastTest(backends);
+}
+
+TEST_CASE ("Logical_NOT_Bool_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ LogicalNotBoolTest(backends);
+}
+
+TEST_CASE ("LogicalBinary_OR_Bool_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ LogicalBinaryOrBoolTest(backends);
+}
+
+TEST_CASE ("LogicalBinary_OR_Broadcast_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ LogicalBinaryOrBroadcastTest(backends);
+}
+
+}
+
+
+TEST_SUITE("LogicalBinaryTests_CpuAccTests")
+{
+
+TEST_CASE ("LogicalBinary_AND_Bool_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ LogicalBinaryAndBoolTest(backends);
+}
+
+TEST_CASE ("LogicalBinary_AND_Broadcast_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ LogicalBinaryAndBroadcastTest(backends);
+}
+
+TEST_CASE ("Logical_NOT_Bool_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ LogicalNotBoolTest(backends);
+}
+
+TEST_CASE ("LogicalBinary_OR_Bool_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ LogicalBinaryOrBoolTest(backends);
+}
+
+TEST_CASE ("LogicalBinary_OR_Broadcast_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ LogicalBinaryOrBroadcastTest(backends);
+}
+
+}
+
+
+TEST_SUITE("LogicalBinaryTests_CpuRefTests")
+{
+
+TEST_CASE ("LogicalBinary_AND_Bool_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ LogicalBinaryAndBoolTest(backends);
+}
+
+TEST_CASE ("LogicalBinary_AND_Broadcast_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ LogicalBinaryAndBroadcastTest(backends);
+}
+
+TEST_CASE ("Logical_NOT_Bool_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ LogicalNotBoolTest(backends);
+}
+
+TEST_CASE ("LogicalBinary_OR_Bool_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ LogicalBinaryOrBoolTest(backends);
+}
+
+TEST_CASE ("LogicalBinary_OR_Broadcast_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ LogicalBinaryOrBroadcastTest(backends);
+}
+
+}
+
+} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/src/test/LogicalTestHelper.hpp b/delegate/src/test/LogicalTestHelper.hpp
new file mode 100644
index 0000000000..d08a1af388
--- /dev/null
+++ b/delegate/src/test/LogicalTestHelper.hpp
@@ -0,0 +1,198 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <tensorflow/lite/schema/schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+
+std::vector<char> CreateLogicalBinaryTfLiteModel(tflite::BuiltinOperator logicalOperatorCode,
+ tflite::TensorType tensorType,
+ const std::vector <int32_t>& input0TensorShape,
+ const std::vector <int32_t>& input1TensorShape,
+ const std::vector <int32_t>& outputTensorShape,
+ float quantScale = 1.0f,
+ int quantOffset = 0)
+{
+ using namespace tflite;
+ flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+ std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+ buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+
+ auto quantizationParameters =
+ CreateQuantizationParameters(flatBufferBuilder,
+ 0,
+ 0,
+ flatBufferBuilder.CreateVector<float>({ quantScale }),
+ flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+
+
+ std::array<flatbuffers::Offset<Tensor>, 3> tensors;
+ tensors[0] = CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(input0TensorShape.data(),
+ input0TensorShape.size()),
+ tensorType,
+ 0,
+ flatBufferBuilder.CreateString("input_0"),
+ quantizationParameters);
+ tensors[1] = CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(input1TensorShape.data(),
+ input1TensorShape.size()),
+ tensorType,
+ 0,
+ flatBufferBuilder.CreateString("input_1"),
+ quantizationParameters);
+ tensors[2] = CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+ outputTensorShape.size()),
+ tensorType,
+ 0,
+ flatBufferBuilder.CreateString("output"),
+ quantizationParameters);
+
+ // create operator
+ tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_NONE;
+ flatbuffers::Offset<void> operatorBuiltinOptions = 0;
+ switch (logicalOperatorCode)
+ {
+ case BuiltinOperator_LOGICAL_AND:
+ {
+ operatorBuiltinOptionsType = BuiltinOptions_LogicalAndOptions;
+ operatorBuiltinOptions = CreateLogicalAndOptions(flatBufferBuilder).Union();
+ break;
+ }
+ case BuiltinOperator_LOGICAL_OR:
+ {
+ operatorBuiltinOptionsType = BuiltinOptions_LogicalOrOptions;
+ operatorBuiltinOptions = CreateLogicalOrOptions(flatBufferBuilder).Union();
+ break;
+ }
+ default:
+ break;
+ }
+ const std::vector<int32_t> operatorInputs{ {0, 1} };
+ const std::vector<int32_t> operatorOutputs{ 2 };
+ flatbuffers::Offset <Operator> logicalBinaryOperator =
+ CreateOperator(flatBufferBuilder,
+ 0,
+ flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+ flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+ operatorBuiltinOptionsType,
+ operatorBuiltinOptions);
+
+ const std::vector<int> subgraphInputs{ {0, 1} };
+ const std::vector<int> subgraphOutputs{ 2 };
+ flatbuffers::Offset <SubGraph> subgraph =
+ CreateSubGraph(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+ flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+ flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+ flatBufferBuilder.CreateVector(&logicalBinaryOperator, 1));
+
+ flatbuffers::Offset <flatbuffers::String> modelDescription =
+ flatBufferBuilder.CreateString("ArmnnDelegate: Logical Binary Operator Model");
+ flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, logicalOperatorCode);
+
+ flatbuffers::Offset <Model> flatbufferModel =
+ CreateModel(flatBufferBuilder,
+ TFLITE_SCHEMA_VERSION,
+ flatBufferBuilder.CreateVector(&operatorCode, 1),
+ flatBufferBuilder.CreateVector(&subgraph, 1),
+ modelDescription,
+ flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+ flatBufferBuilder.Finish(flatbufferModel);
+
+ return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+ flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template <typename T>
+void LogicalBinaryTest(tflite::BuiltinOperator logicalOperatorCode,
+ tflite::TensorType tensorType,
+ std::vector<armnn::BackendId>& backends,
+ std::vector<int32_t>& input0Shape,
+ std::vector<int32_t>& input1Shape,
+ std::vector<int32_t>& expectedOutputShape,
+ std::vector<T>& input0Values,
+ std::vector<T>& input1Values,
+ std::vector<T>& expectedOutputValues,
+ float quantScale = 1.0f,
+ int quantOffset = 0)
+{
+ using namespace tflite;
+ std::vector<char> modelBuffer = CreateLogicalBinaryTfLiteModel(logicalOperatorCode,
+ tensorType,
+ input0Shape,
+ input1Shape,
+ expectedOutputShape,
+ quantScale,
+ quantOffset);
+
+ const Model* tfLiteModel = GetModel(modelBuffer.data());
+ // Create TfLite Interpreters
+ std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+ CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+ (&armnnDelegateInterpreter) == kTfLiteOk);
+ CHECK(armnnDelegateInterpreter != nullptr);
+ CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+ std::unique_ptr<Interpreter> tfLiteInterpreter;
+ CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+ (&tfLiteInterpreter) == kTfLiteOk);
+ CHECK(tfLiteInterpreter != nullptr);
+ CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+ // Create the ArmNN Delegate
+ armnnDelegate::DelegateOptions delegateOptions(backends);
+ std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+ theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+ armnnDelegate::TfLiteArmnnDelegateDelete);
+ CHECK(theArmnnDelegate != nullptr);
+ // Modify armnnDelegateInterpreter to use armnnDelegate
+ CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+ // Set input data for the armnn interpreter
+ armnnDelegate::FillInput(armnnDelegateInterpreter, 0, input0Values);
+ armnnDelegate::FillInput(armnnDelegateInterpreter, 1, input1Values);
+
+ // Set input data for the tflite interpreter
+ armnnDelegate::FillInput(tfLiteInterpreter, 0, input0Values);
+ armnnDelegate::FillInput(tfLiteInterpreter, 1, input1Values);
+
+ // Run EnqueWorkload
+ CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+ CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+ // Compare output data, comparing Boolean values is handled differently and needs to call the CompareData function
+ // directly. This is because Boolean types get converted to a bit representation in a vector.
+ auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
+ auto tfLiteDelegateOutputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateOutputId);
+ auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
+ auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateOutputId);
+
+ armnnDelegate::CompareData(expectedOutputValues, armnnDelegateOutputData, expectedOutputValues.size());
+ armnnDelegate::CompareData(expectedOutputValues, tfLiteDelegateOutputData, expectedOutputValues.size());
+ armnnDelegate::CompareData(tfLiteDelegateOutputData, armnnDelegateOutputData, expectedOutputValues.size());
+
+ armnnDelegateInterpreter.reset(nullptr);
+ tfLiteInterpreter.reset(nullptr);
+}
+
+} // anonymous namespace \ No newline at end of file