From 8b9858d891439fd1b0710e5d245e2116a3b88d30 Mon Sep 17 00:00:00 2001 From: Sadik Armagan Date: Mon, 9 Nov 2020 08:26:22 +0000 Subject: IVGCVSW-5380 'TfLiteDelegate: Implement the Comparison operators' * Implemented Comparison Operators * Added unit tests Signed-off-by: Sadik Armagan Change-Id: Icdc0f7c6a286a8364a2770b26d15e8958291dc2b --- delegate/CMakeLists.txt | 2 + delegate/src/Comparison.hpp | 107 +++- delegate/src/DelegateUtils.hpp | 9 +- delegate/src/test/ComparisonTest.cpp | 754 +++++++++++++++++++++++++++++ delegate/src/test/ComparisonTestHelper.hpp | 236 +++++++++ 5 files changed, 1098 insertions(+), 10 deletions(-) create mode 100644 delegate/src/test/ComparisonTest.cpp create mode 100644 delegate/src/test/ComparisonTestHelper.hpp diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt index acce8284a5..814976407b 100644 --- a/delegate/CMakeLists.txt +++ b/delegate/CMakeLists.txt @@ -89,6 +89,8 @@ target_include_directories(armnnDelegate set(armnnDelegate_unittest_sources) list(APPEND armnnDelegate_unittest_sources src/test/ArmnnDelegateTest.cpp + src/test/ComparisonTest.cpp + src/test/ComparisonTestHelper.hpp src/test/ElementwiseBinaryTest.cpp src/test/ElementwiseBinaryTestHelper.hpp src/test/ElementwiseUnaryTest.cpp diff --git a/delegate/src/Comparison.hpp b/delegate/src/Comparison.hpp index 19d8de10e1..f787a22090 100644 --- a/delegate/src/Comparison.hpp +++ b/delegate/src/Comparison.hpp @@ -5,6 +5,8 @@ #pragma once +#include "DelegateUtils.hpp" + #include #include #include @@ -17,9 +19,110 @@ TfLiteStatus VisitComparisonOperator(DelegateData& delegateData, TfLiteContext* tfLiteContext, TfLiteNode* tfLiteNode, int nodeIndex, - int32_t comparisonOperatorCode) + int32_t tfLiteComparisonOperatorCode) { - return kTfLiteError; + TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex)); + TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + + const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors; + const TfLiteTensor& tfLiteInputTensor0 = tfLiteTensors[tfLiteNode->inputs->data[0]]; + if (IsDynamicTensor(tfLiteInputTensor0)) + { + TF_LITE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ", + tfLiteComparisonOperatorCode, nodeIndex); + return kTfLiteError; + } + + const TfLiteTensor& tfLiteInputTensor1 = tfLiteTensors[tfLiteNode->inputs->data[1]]; + if (IsDynamicTensor(tfLiteInputTensor1)) + { + TF_LITE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ", + tfLiteComparisonOperatorCode, nodeIndex); + return kTfLiteError; + } + + const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]]; + if (IsDynamicTensor(tfLiteOutputTensor)) + { + TF_LITE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ", + tfLiteComparisonOperatorCode, nodeIndex); + return kTfLiteError; + } + + const armnn::TensorInfo& inputTensorInfo0 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor0); + const armnn::TensorInfo& inputTensorInfo1 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor1); + const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor); + + armnn::ComparisonOperation comparisonOperation = armnn::ComparisonOperation::Equal; + switch(tfLiteComparisonOperatorCode) + { + case kTfLiteBuiltinEqual: + comparisonOperation = armnn::ComparisonOperation::Equal; + break; + case kTfLiteBuiltinGreater: + comparisonOperation = armnn::ComparisonOperation::Greater; + break; + case kTfLiteBuiltinGreaterEqual: + comparisonOperation = armnn::ComparisonOperation::GreaterOrEqual; + break; + case kTfLiteBuiltinLess: + comparisonOperation = armnn::ComparisonOperation::Less; + break; + case kTfLiteBuiltinLessEqual: + comparisonOperation = armnn::ComparisonOperation::LessOrEqual; + break; + case kTfLiteBuiltinNotEqual: + comparisonOperation = armnn::ComparisonOperation::NotEqual; + break; + default: + return kTfLiteError; + } + + armnn::ComparisonDescriptor descriptor(comparisonOperation); + bool isSupported = false; + + auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC(__func__, + tfLiteContext, + IsComparisonSupported, + delegateData.m_Backends, + isSupported, + inputTensorInfo0, + inputTensorInfo1, + outputTensorInfo, + descriptor); + }; + + if (!delegateData.m_Network) + { + validateFunc(outputTensorInfo, isSupported); + return isSupported ? kTfLiteOk : kTfLiteError; + } + + armnn::IConnectableLayer* comparisonLayer = delegateData.m_Network->AddComparisonLayer(descriptor); + ARMNN_ASSERT(comparisonLayer != nullptr); + + armnn::IOutputSlot& outputSlot = comparisonLayer->GetOutputSlot(0); + outputSlot.SetTensorInfo(outputTensorInfo); + + auto reshapeLayer = BroadcastTensor(inputTensorInfo0, + inputTensorInfo1, + comparisonLayer, + tfLiteContext, + tfLiteNode, + delegateData); + if (!reshapeLayer) + { + return kTfLiteError; + } + return kTfLiteOk; } } // namespace armnnDelegate diff --git a/delegate/src/DelegateUtils.hpp b/delegate/src/DelegateUtils.hpp index fca6a6c9ed..00279f630d 100644 --- a/delegate/src/DelegateUtils.hpp +++ b/delegate/src/DelegateUtils.hpp @@ -139,14 +139,7 @@ armnn::IConnectableLayer* BroadcastTensor(const armnn::TensorInfo& inputInfo0, if (inputDimensions0 == inputDimensions1) { auto status = Connect(startLayer, tfLiteNode, delegateData); - if(status == kTfLiteOk) - { - return startLayer; - } - else - { - return nullptr; - } + return status == kTfLiteOk ? startLayer : nullptr; } unsigned int biggerInputDimensions = std::max(inputDimensions0, inputDimensions1); diff --git a/delegate/src/test/ComparisonTest.cpp b/delegate/src/test/ComparisonTest.cpp new file mode 100644 index 0000000000..0826535c8f --- /dev/null +++ b/delegate/src/test/ComparisonTest.cpp @@ -0,0 +1,754 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "ComparisonTestHelper.hpp" + +#include + +#include +#include +#include +#include +#include +#include + +#include + +namespace armnnDelegate +{ + +void EqualFP32Test(std::vector& backends) +{ + std::vector input0Shape { 2, 2, 2, 2 }; + std::vector input1Shape { 2, 2, 2, 2 }; + std::vector expectedOutputShape { 2, 2, 2, 2 }; + + std::vector input0Values = + { + 1.f, 1.f, 1.f, 1.f, 5.f, 5.f, 5.f, 5.f, + 3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f + }; + + std::vector input1Values = + { + 1.f, 1.f, 1.f, 1.f, 3.f, 3.f, 3.f, 3.f, + 5.f, 5.f, 5.f, 5.f, 4.f, 4.f, 4.f, 4.f + }; + + std::vector expectedOutputValues = + { + 1, 1, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 1, 1, 1, 1 + }; + + + ComparisonTest(tflite::BuiltinOperator_EQUAL, + ::tflite::TensorType_FLOAT32, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues); +} + +void EqualBroadcastTest(std::vector& backends) +{ + std::vector input0Shape { 1, 2, 2, 3 }; + std::vector input1Shape { 1, 1, 1, 3 }; + std::vector expectedOutputShape { 1, 2, 2, 3 }; + + std::vector input0Values + { + 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, + 7.f, 8.f, 9.f, 10.f, 11.f, 12.f + }; + std::vector input1Values { 4.f, 5.f, 6.f }; + // Set output data + std::vector expectedOutputValues + { + 0, 0, 0, 1, 1, 1, + 0, 0, 0, 0, 0, 0 + }; + ComparisonTest(tflite::BuiltinOperator_EQUAL, + ::tflite::TensorType_FLOAT32, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues); +} + +void EqualInt32Test(std::vector& backends) +{ + std::vector input0Shape { 1, 2, 2, 1 }; + std::vector input1Shape { 1, 2, 2, 1 }; + std::vector expectedOutputShape { 1, 2, 2, 1 }; + + std::vector input0Values = { 1, 5, 6, 4 }; + + std::vector input1Values = { 1, 3, 9, 4 }; + + std::vector expectedOutputValues = { 1, 0, 0, 1 }; + + ComparisonTest(tflite::BuiltinOperator_EQUAL, + ::tflite::TensorType_INT32, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues); +} + +void NotEqualFP32Test(std::vector& backends) +{ + std::vector input0Shape { 2, 2, 2, 2 }; + std::vector input1Shape { 2, 2, 2, 2 }; + std::vector expectedOutputShape { 2, 2, 2, 2 }; + + std::vector input0Values = + { + 1.f, 1.f, 1.f, 1.f, 5.f, 5.f, 5.f, 5.f, + 3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f + }; + + std::vector input1Values = + { + 1.f, 1.f, 1.f, 1.f, 3.f, 3.f, 3.f, 3.f, + 5.f, 5.f, 5.f, 5.f, 4.f, 4.f, 4.f, 4.f + }; + + std::vector expectedOutputValues = + { + 0, 0, 0, 0, 1, 1, 1, 1, + 1, 1, 1, 1, 0, 0, 0, 0 + }; + + ComparisonTest(tflite::BuiltinOperator_NOT_EQUAL, + ::tflite::TensorType_FLOAT32, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues); +} + +void NotEqualBroadcastTest(std::vector& backends) +{ + std::vector input0Shape { 1, 2, 2, 3 }; + std::vector input1Shape { 1, 1, 1, 3 }; + std::vector expectedOutputShape { 1, 2, 2, 3 }; + + std::vector input0Values + { + 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, + 7.f, 8.f, 9.f, 10.f, 11.f, 12.f + }; + std::vector input1Values { 4.f, 5.f, 6.f }; + // Set output data + std::vector expectedOutputValues + { + 1, 1, 1, 0, 0, 0, + 1, 1, 1, 1, 1, 1 + }; + ComparisonTest(tflite::BuiltinOperator_NOT_EQUAL, + ::tflite::TensorType_FLOAT32, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues); +} + +void NotEqualInt32Test(std::vector& backends) +{ + std::vector input0Shape { 1, 2, 2, 1 }; + std::vector input1Shape { 1, 2, 2, 1 }; + std::vector expectedOutputShape { 1, 2, 2, 1 }; + + std::vector input0Values = { 1, 5, 6, 4 }; + + std::vector input1Values = { 1, 3, 9, 4 }; + + std::vector expectedOutputValues = { 0, 1, 1, 0 }; + + ComparisonTest(tflite::BuiltinOperator_NOT_EQUAL, + ::tflite::TensorType_INT32, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues); +} + +void GreaterFP32Test(std::vector& backends) +{ + std::vector input0Shape { 1, 2, 2, 1 }; + std::vector input1Shape { 1, 2, 2, 1 }; + std::vector expectedOutputShape { 1, 2, 2, 1 }; + + std::vector input0Values = { 1, 5, 6, 4 }; + + std::vector input1Values = { 1, 3, 9, 4 }; + + std::vector expectedOutputValues = { 0, 1, 0, 0 }; + + ComparisonTest(tflite::BuiltinOperator_GREATER, + ::tflite::TensorType_FLOAT32, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues); +} + +void GreaterBroadcastTest(std::vector& backends) +{ + std::vector input0Shape { 1, 2, 2, 3 }; + std::vector input1Shape { 1, 1, 1, 3 }; + std::vector expectedOutputShape { 1, 2, 2, 3 }; + + std::vector input0Values + { + 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, + 7.f, 8.f, 9.f, 10.f, 11.f, 12.f + }; + std::vector input1Values { 4.f, 5.f, 6.f }; + + std::vector expectedOutputValues + { + 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 1, 1 + }; + ComparisonTest(tflite::BuiltinOperator_GREATER, + ::tflite::TensorType_FLOAT32, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues); +} + +void GreaterInt32Test(std::vector& backends) +{ + std::vector input0Shape { 1, 2, 2, 1 }; + std::vector input1Shape { 1, 2, 2, 1 }; + std::vector expectedOutputShape { 1, 2, 2, 1 }; + + std::vector input0Values = { 1, 5, 6, 4 }; + + std::vector input1Values = { 1, 3, 9, 4 }; + + std::vector expectedOutputValues = { 0, 1, 0, 0 }; + + ComparisonTest(tflite::BuiltinOperator_GREATER, + ::tflite::TensorType_INT32, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues); +} + +void GreaterEqualFP32Test(std::vector& backends) +{ + std::vector input0Shape { 1, 2, 2, 1 }; + std::vector input1Shape { 1, 2, 2, 1 }; + std::vector expectedOutputShape { 1, 2, 2, 1 }; + + std::vector input0Values = { 1.f, 5.f, 6.f, 4.f }; + + std::vector input1Values = { 1.f, 3.f, 9.f, 4.f }; + + std::vector expectedOutputValues = { true, true, false, true }; + + ComparisonTest(tflite::BuiltinOperator_GREATER_EQUAL, + ::tflite::TensorType_FLOAT32, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues); +} + +void GreaterEqualBroadcastTest(std::vector& backends) +{ + std::vector input0Shape { 1, 2, 2, 3 }; + std::vector input1Shape { 1, 1, 1, 3 }; + std::vector expectedOutputShape { 1, 2, 2, 3 }; + + std::vector input0Values + { + 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, + 7.f, 8.f, 9.f, 10.f, 11.f, 12.f + }; + std::vector input1Values { 4.f, 5.f, 6.f }; + // Set output data + std::vector expectedOutputValues + { + 0, 0, 0, 1, 1, 1, + 1, 1, 1, 1, 1, 1 + }; + + ComparisonTest(tflite::BuiltinOperator_GREATER_EQUAL, + ::tflite::TensorType_FLOAT32, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues); +} + +void GreaterEqualInt32Test(std::vector& backends) +{ + std::vector input0Shape { 1, 2, 2, 1 }; + std::vector input1Shape { 1, 2, 2, 1 }; + std::vector expectedOutputShape { 1, 2, 2, 1 }; + + std::vector input0Values = { 1, 5, 6, 3 }; + + std::vector input1Values = { 1, 3, 9, 4 }; + + std::vector expectedOutputValues = { 1, 1, 0, 0 }; + + ComparisonTest(tflite::BuiltinOperator_GREATER_EQUAL, + ::tflite::TensorType_INT32, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues); +} + +void LessFP32Test(std::vector& backends) +{ + std::vector input0Shape { 1, 2, 2, 1 }; + std::vector input1Shape { 1, 2, 2, 1 }; + std::vector expectedOutputShape { 1, 2, 2, 1 }; + + std::vector input0Values = { 1.f, 5.f, 6.f, 4.f }; + + std::vector input1Values = { 1.f, 3.f, 9.f, 4.f }; + + std::vector expectedOutputValues = { false, false, true, false }; + + ComparisonTest(tflite::BuiltinOperator_LESS, + ::tflite::TensorType_FLOAT32, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues); +} + +void LessBroadcastTest(std::vector& backends) +{ + std::vector input0Shape { 1, 2, 2, 3 }; + std::vector input1Shape { 1, 1, 1, 3 }; + std::vector expectedOutputShape { 1, 2, 2, 3 }; + + std::vector input0Values + { + 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, + 7.f, 8.f, 9.f, 10.f, 11.f, 12.f + }; + std::vector input1Values { 4.f, 5.f, 6.f }; + + std::vector expectedOutputValues + { + true, true, true, false, false, false, + false, false, false, false, false, false + }; + + ComparisonTest(tflite::BuiltinOperator_LESS, + ::tflite::TensorType_FLOAT32, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues); +} + +void LessInt32Test(std::vector& backends) +{ + std::vector input0Shape { 1, 2, 2, 1 }; + std::vector input1Shape { 1, 2, 2, 1 }; + std::vector expectedOutputShape { 1, 2, 2, 1 }; + + std::vector input0Values = { 1, 5, 6, 3 }; + + std::vector input1Values = { 1, 3, 9, 4 }; + + std::vector expectedOutputValues = { false, false, true, true }; + + ComparisonTest(tflite::BuiltinOperator_LESS, + ::tflite::TensorType_INT32, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues); +} + +void LessEqualFP32Test(std::vector& backends) +{ + std::vector input0Shape { 1, 2, 2, 1 }; + std::vector input1Shape { 1, 2, 2, 1 }; + std::vector expectedOutputShape { 1, 2, 2, 1 }; + + std::vector input0Values = { 1.f, 5.f, 6.f, 4.f }; + + std::vector input1Values = { 1.f, 3.f, 9.f, 4.f }; + + std::vector expectedOutputValues = { true, false, true, true }; + + ComparisonTest(tflite::BuiltinOperator_LESS_EQUAL, + ::tflite::TensorType_FLOAT32, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues); +} + +void LessEqualBroadcastTest(std::vector& backends) +{ + std::vector input0Shape { 1, 2, 2, 3 }; + std::vector input1Shape { 1, 1, 1, 3 }; + std::vector expectedOutputShape { 1, 2, 2, 3 }; + + std::vector input0Values + { + 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, + 7.f, 8.f, 9.f, 10.f, 11.f, 12.f + }; + std::vector input1Values { 4.f, 5.f, 6.f }; + + std::vector expectedOutputValues + { + true, true, true, true, true, true, + false, false, false, false, false, false + }; + + ComparisonTest(tflite::BuiltinOperator_LESS_EQUAL, + ::tflite::TensorType_FLOAT32, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues); +} + +void LessEqualInt32Test(std::vector& backends) +{ + std::vector input0Shape { 1, 2, 2, 1 }; + std::vector input1Shape { 1, 2, 2, 1 }; + std::vector expectedOutputShape { 1, 2, 2, 1 }; + + std::vector input0Values = { 1, 5, 6, 3 }; + + std::vector input1Values = { 1, 3, 9, 4 }; + + std::vector expectedOutputValues = { true, false, true, true }; + + ComparisonTest(tflite::BuiltinOperator_LESS_EQUAL, + ::tflite::TensorType_INT32, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues); +} + +TEST_SUITE("ComparisonTest") +{ + +TEST_CASE ("EQUAL_FP32_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + EqualFP32Test(backends); +} + +TEST_CASE ("EQUAL_FP32_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + EqualFP32Test(backends); +} + +TEST_CASE ("EQUAL_Broadcast_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + EqualBroadcastTest(backends); +} + +TEST_CASE ("EQUAL_Broadcast_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + EqualBroadcastTest(backends); +} + +TEST_CASE ("EQUAL_INT32_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + EqualInt32Test(backends); +} + +TEST_CASE ("EQUAL_INT32_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + EqualInt32Test(backends); +} + +TEST_CASE ("NOT_EQUAL_FP32_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + NotEqualFP32Test(backends); +} + +TEST_CASE ("NOT_EQUAL_FP32_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + NotEqualFP32Test(backends); +} + +TEST_CASE ("NOT_EQUAL_Broadcast_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + NotEqualBroadcastTest(backends); +} + +TEST_CASE ("NOT_EQUAL_Broadcast_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + NotEqualBroadcastTest(backends); +} + +TEST_CASE ("NOT_EQUAL_INT32_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + NotEqualInt32Test(backends); +} + +TEST_CASE ("NOT_EQUAL_INT32_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + NotEqualInt32Test(backends); +} + +TEST_CASE ("GREATER_FP32_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + GreaterFP32Test(backends); +} + +TEST_CASE ("GREATER_FP32_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + GreaterFP32Test(backends); +} + +TEST_CASE ("GREATER_Broadcast_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + GreaterBroadcastTest(backends); +} + +TEST_CASE ("GREATER_Broadcast_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + GreaterBroadcastTest(backends); +} + +TEST_CASE ("GREATER_INT32_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + GreaterInt32Test(backends); +} + +TEST_CASE ("GREATER_INT32_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + GreaterInt32Test(backends); +} +TEST_CASE ("GREATER_EQUAL_FP32_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + GreaterEqualFP32Test(backends); +} + +TEST_CASE ("GREATER_EQUAL_FP32_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + GreaterEqualFP32Test(backends); +} + +TEST_CASE ("GREATER_EQUAL_Broadcast_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + GreaterEqualBroadcastTest(backends); +} + +TEST_CASE ("GREATER_EQUAL_Broadcast_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + GreaterEqualBroadcastTest(backends); +} + +TEST_CASE ("GREATER_EQUAL_INT32_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + GreaterEqualInt32Test(backends); +} + +TEST_CASE ("GREATER_EQUAL_INT32_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + GreaterEqualInt32Test(backends); +} +TEST_CASE ("LESS_FP32_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + LessFP32Test(backends); +} + +TEST_CASE ("LESS_FP32_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + LessFP32Test(backends); +} + +TEST_CASE ("LESS_Broadcast_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + LessBroadcastTest(backends); +} + +TEST_CASE ("LESS_Broadcast_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + LessBroadcastTest(backends); +} + +TEST_CASE ("LESS_INT32_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + LessInt32Test(backends); +} + +TEST_CASE ("LESS_INT32_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + LessInt32Test(backends); +} +TEST_CASE ("LESS_EQUAL_FP32_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + LessEqualFP32Test(backends); +} + +TEST_CASE ("LESS_EQUAL_FP32_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + LessEqualFP32Test(backends); +} + +TEST_CASE ("LESS_EQUAL_Broadcast_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + LessEqualBroadcastTest(backends); +} + +TEST_CASE ("LESS_EQUAL_Broadcast_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + LessEqualBroadcastTest(backends); +} + +TEST_CASE ("LESS_EQUAL_INT32_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + LessEqualInt32Test(backends); +} + +TEST_CASE ("LESS_EQUAL_INT32_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + LessEqualInt32Test(backends); +} + +} // End TEST_SUITE("ComparisonTest") + +} // namespace armnnDelegate \ No newline at end of file diff --git a/delegate/src/test/ComparisonTestHelper.hpp b/delegate/src/test/ComparisonTestHelper.hpp new file mode 100644 index 0000000000..0011c763a0 --- /dev/null +++ b/delegate/src/test/ComparisonTestHelper.hpp @@ -0,0 +1,236 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include + +#include +#include +#include +#include +#include +#include + +#include + +namespace +{ + +std::vector CreateComparisonTfLiteModel(tflite::BuiltinOperator comparisonOperatorCode, + tflite::TensorType tensorType, + const std::vector & input0TensorShape, + const std::vector & input1TensorShape, + const std::vector & outputTensorShape, + float quantScale = 1.0f, + int quantOffset = 0) +{ + using namespace tflite; + flatbuffers::FlatBufferBuilder flatBufferBuilder; + + std::vector> buffers; + buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}))); + + auto quantizationParameters = + CreateQuantizationParameters(flatBufferBuilder, + 0, + 0, + flatBufferBuilder.CreateVector({ quantScale }), + flatBufferBuilder.CreateVector({ quantOffset })); + + std::array, 3> tensors; + tensors[0] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(input0TensorShape.data(), + input0TensorShape.size()), + tensorType, + 0, + flatBufferBuilder.CreateString("input_0"), + quantizationParameters); + tensors[1] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(input1TensorShape.data(), + input1TensorShape.size()), + tensorType, + 0, + flatBufferBuilder.CreateString("input_1"), + quantizationParameters); + tensors[2] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(outputTensorShape.data(), + outputTensorShape.size()), + ::tflite::TensorType_BOOL, + 0); + + // create operator + tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_EqualOptions;; + flatbuffers::Offset operatorBuiltinOptions = CreateEqualOptions(flatBufferBuilder).Union(); + switch (comparisonOperatorCode) + { + case BuiltinOperator_EQUAL: + { + operatorBuiltinOptionsType = BuiltinOptions_EqualOptions; + operatorBuiltinOptions = CreateEqualOptions(flatBufferBuilder).Union(); + break; + } + case BuiltinOperator_NOT_EQUAL: + { + operatorBuiltinOptionsType = BuiltinOptions_NotEqualOptions; + operatorBuiltinOptions = CreateNotEqualOptions(flatBufferBuilder).Union(); + break; + } + case BuiltinOperator_GREATER: + { + operatorBuiltinOptionsType = BuiltinOptions_GreaterOptions; + operatorBuiltinOptions = CreateGreaterOptions(flatBufferBuilder).Union(); + break; + } + case BuiltinOperator_GREATER_EQUAL: + { + operatorBuiltinOptionsType = BuiltinOptions_GreaterEqualOptions; + operatorBuiltinOptions = CreateGreaterEqualOptions(flatBufferBuilder).Union(); + break; + } + case BuiltinOperator_LESS: + { + operatorBuiltinOptionsType = BuiltinOptions_LessOptions; + operatorBuiltinOptions = CreateLessOptions(flatBufferBuilder).Union(); + break; + } + case BuiltinOperator_LESS_EQUAL: + { + operatorBuiltinOptionsType = BuiltinOptions_LessEqualOptions; + operatorBuiltinOptions = CreateLessEqualOptions(flatBufferBuilder).Union(); + break; + } + default: + break; + } + const std::vector operatorInputs{ {0, 1} }; + const std::vector operatorOutputs{{2}}; + flatbuffers::Offset comparisonOperator = + CreateOperator(flatBufferBuilder, + 0, + flatBufferBuilder.CreateVector(operatorInputs.data(), operatorInputs.size()), + flatBufferBuilder.CreateVector(operatorOutputs.data(), operatorOutputs.size()), + operatorBuiltinOptionsType, + operatorBuiltinOptions); + + const std::vector subgraphInputs{ {0, 1} }; + const std::vector subgraphOutputs{{2}}; + flatbuffers::Offset subgraph = + CreateSubGraph(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensors.data(), tensors.size()), + flatBufferBuilder.CreateVector(subgraphInputs.data(), subgraphInputs.size()), + flatBufferBuilder.CreateVector(subgraphOutputs.data(), subgraphOutputs.size()), + flatBufferBuilder.CreateVector(&comparisonOperator, 1)); + + flatbuffers::Offset modelDescription = + flatBufferBuilder.CreateString("ArmnnDelegate: Comparison Operator Model"); + flatbuffers::Offset operatorCode = CreateOperatorCode(flatBufferBuilder, comparisonOperatorCode); + + flatbuffers::Offset flatbufferModel = + CreateModel(flatBufferBuilder, + TFLITE_SCHEMA_VERSION, + flatBufferBuilder.CreateVector(&operatorCode, 1), + flatBufferBuilder.CreateVector(&subgraph, 1), + modelDescription, + flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); + + flatBufferBuilder.Finish(flatbufferModel); + + return std::vector(flatBufferBuilder.GetBufferPointer(), + flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); +} + +template +void ComparisonTest(tflite::BuiltinOperator comparisonOperatorCode, + tflite::TensorType tensorType, + std::vector& backends, + std::vector& input0Shape, + std::vector& input1Shape, + std::vector& outputShape, + std::vector& input0Values, + std::vector& input1Values, + std::vector& expectedOutputValues, + float quantScale = 1.0f, + int quantOffset = 0) +{ + using namespace tflite; + std::vector modelBuffer = CreateComparisonTfLiteModel(comparisonOperatorCode, + tensorType, + input0Shape, + input1Shape, + outputShape, + quantScale, + quantOffset); + + const Model* tfLiteModel = GetModel(modelBuffer.data()); + // Create TfLite Interpreters + std::unique_ptr armnnDelegateInterpreter; + CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) + (&armnnDelegateInterpreter) == kTfLiteOk); + CHECK(armnnDelegateInterpreter != nullptr); + CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); + + std::unique_ptr tfLiteInterpreter; + CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) + (&tfLiteInterpreter) == kTfLiteOk); + CHECK(tfLiteInterpreter != nullptr); + CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); + + // Create the ArmNN Delegate + armnnDelegate::DelegateOptions delegateOptions(backends); + std::unique_ptr + theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), + armnnDelegate::TfLiteArmnnDelegateDelete); + CHECK(theArmnnDelegate != nullptr); + // Modify armnnDelegateInterpreter to use armnnDelegate + CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); + + // Set input data + auto tfLiteDelegateInput0Id = tfLiteInterpreter->inputs()[0]; + auto tfLiteDelageInput0Data = tfLiteInterpreter->typed_tensor(tfLiteDelegateInput0Id); + for (unsigned int i = 0; i < input0Values.size(); ++i) + { + tfLiteDelageInput0Data[i] = input0Values[i]; + } + + auto tfLiteDelegateInput1Id = tfLiteInterpreter->inputs()[1]; + auto tfLiteDelageInput1Data = tfLiteInterpreter->typed_tensor(tfLiteDelegateInput1Id); + for (unsigned int i = 0; i < input1Values.size(); ++i) + { + tfLiteDelageInput1Data[i] = input1Values[i]; + } + + auto armnnDelegateInput0Id = armnnDelegateInterpreter->inputs()[0]; + auto armnnDelegateInput0Data = armnnDelegateInterpreter->typed_tensor(armnnDelegateInput0Id); + for (unsigned int i = 0; i < input0Values.size(); ++i) + { + armnnDelegateInput0Data[i] = input0Values[i]; + } + + auto armnnDelegateInput1Id = armnnDelegateInterpreter->inputs()[1]; + auto armnnDelegateInput1Data = armnnDelegateInterpreter->typed_tensor(armnnDelegateInput1Id); + for (unsigned int i = 0; i < input1Values.size(); ++i) + { + armnnDelegateInput1Data[i] = input1Values[i]; + } + + // Run EnqueWorkload + CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); + CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); + // Compare output data + auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0]; + auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); + auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0]; + auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateOutputId); + + for (size_t i = 0; i < expectedOutputValues.size(); i++) + { + CHECK(expectedOutputValues[i] == armnnDelegateOutputData[i]); + CHECK(tfLiteDelageOutputData[i] == expectedOutputValues[i]); + CHECK(tfLiteDelageOutputData[i] == armnnDelegateOutputData[i]); + } +} + +} // anonymous namespace \ No newline at end of file -- cgit v1.2.1