From 21a94ff6212aac28398f90373e873d43390070a3 Mon Sep 17 00:00:00 2001 From: Sadik Armagan Date: Mon, 9 Nov 2020 08:38:30 +0000 Subject: IVGCVSW-5379 'TfLiteDelegate: Implement the ElementWiseBinary operators' * Enabled elementwise binary operators DIV, MAXIMUM, MINIMUM, MUL, SUB * Implemented unit tests for elementwise binary operators Signed-off-by: Sadik Armagan Change-Id: I196998d53201a6e8888bb203eb640530b8feeac9 --- delegate/cmake/Modules/FindTfLite.cmake | 2 +- delegate/src/ElementwiseBinary.hpp | 161 +++- delegate/src/test/ElementwiseBinaryTest.cpp | 878 ++++++++++++++++++++-- delegate/src/test/ElementwiseBinaryTestHelper.hpp | 89 ++- 4 files changed, 1044 insertions(+), 86 deletions(-) diff --git a/delegate/cmake/Modules/FindTfLite.cmake b/delegate/cmake/Modules/FindTfLite.cmake index d6c274cb65..96e15db108 100644 --- a/delegate/cmake/Modules/FindTfLite.cmake +++ b/delegate/cmake/Modules/FindTfLite.cmake @@ -11,7 +11,7 @@ find_path(TfLite_INCLUDE_DIR tensorflow/lite third_party HINTS - ${TFLITE_LIB_ROOT}) + ${TENSORFLOW_ROOT}/..) find_library(TfLite_LIB NAMES diff --git a/delegate/src/ElementwiseBinary.hpp b/delegate/src/ElementwiseBinary.hpp index a22d9f5751..3d3f1a0799 100644 --- a/delegate/src/ElementwiseBinary.hpp +++ b/delegate/src/ElementwiseBinary.hpp @@ -38,15 +38,119 @@ TfLiteStatus ValidateAddOperator(DelegateData& delegateData, return isSupported ? kTfLiteOk : kTfLiteError; } -armnn::IConnectableLayer* AddAdditionLayer(DelegateData& delegateData) +TfLiteStatus ValidateDivOperator(DelegateData& delegateData, + TfLiteContext* tfLiteContext, + const armnn::TensorInfo& inputInfo1, + const armnn::TensorInfo& inputInfo2, + const armnn::TensorInfo& outputInfo) { + bool isSupported = false; + auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC(__func__, + tfLiteContext, + IsDivisionSupported, + delegateData.m_Backends, + isSupported, + inputInfo1, + inputInfo2, + outputTensorInfo); + }; - if (!delegateData.m_Network) + validateFunc(outputInfo, isSupported); + return isSupported ? kTfLiteOk : kTfLiteError; +} + +TfLiteStatus ValidateMaximumOperator(DelegateData& delegateData, + TfLiteContext* tfLiteContext, + const armnn::TensorInfo& inputInfo1, + const armnn::TensorInfo& inputInfo2, + const armnn::TensorInfo& outputInfo) +{ + bool isSupported = false; + auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported) { - return nullptr; - } + FORWARD_LAYER_SUPPORT_FUNC(__func__, + tfLiteContext, + IsMaximumSupported, + delegateData.m_Backends, + isSupported, + inputInfo1, + inputInfo2, + outputTensorInfo); + }; + + validateFunc(outputInfo, isSupported); + return isSupported ? kTfLiteOk : kTfLiteError; +} + +TfLiteStatus ValidateMinimumOperator(DelegateData& delegateData, + TfLiteContext* tfLiteContext, + const armnn::TensorInfo& inputInfo1, + const armnn::TensorInfo& inputInfo2, + const armnn::TensorInfo& outputInfo) +{ + bool isSupported = false; + auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC(__func__, + tfLiteContext, + IsMinimumSupported, + delegateData.m_Backends, + isSupported, + inputInfo1, + inputInfo2, + outputTensorInfo); + }; + + validateFunc(outputInfo, isSupported); + return isSupported ? kTfLiteOk : kTfLiteError; +} + +TfLiteStatus ValidateMulOperator(DelegateData& delegateData, + TfLiteContext* tfLiteContext, + const armnn::TensorInfo& inputInfo1, + const armnn::TensorInfo& inputInfo2, + const armnn::TensorInfo& outputInfo) +{ + bool isSupported = false; + auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC(__func__, + tfLiteContext, + IsMultiplicationSupported, + delegateData.m_Backends, + isSupported, + inputInfo1, + inputInfo2, + outputTensorInfo); + }; - return delegateData.m_Network->AddAdditionLayer(); + validateFunc(outputInfo, isSupported); + return isSupported ? kTfLiteOk : kTfLiteError; +} + +TfLiteStatus ValidateSubOperator(DelegateData& delegateData, + TfLiteContext* tfLiteContext, + const armnn::TensorInfo& inputInfo1, + const armnn::TensorInfo& inputInfo2, + const armnn::TensorInfo& outputInfo) +{ + bool isSupported = false; + auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC(__func__, + tfLiteContext, + IsSubtractionSupported, + delegateData.m_Backends, + isSupported, + inputInfo1, + inputInfo2, + outputTensorInfo); + }; + + validateFunc(outputInfo, isSupported); + return isSupported ? kTfLiteOk : kTfLiteError; } TfLiteStatus VisitElementwiseBinaryOperator(DelegateData& delegateData, @@ -103,6 +207,36 @@ TfLiteStatus VisitElementwiseBinaryOperator(DelegateData& delegateData, inputTensorInfo0, inputTensorInfo1, outputTensorInfo); + case kTfLiteBuiltinDiv: + return ValidateDivOperator(delegateData, + tfLiteContext, + inputTensorInfo0, + inputTensorInfo1, + outputTensorInfo); + case kTfLiteBuiltinMaximum: + return ValidateMaximumOperator(delegateData, + tfLiteContext, + inputTensorInfo0, + inputTensorInfo1, + outputTensorInfo); + case kTfLiteBuiltinMinimum: + return ValidateMinimumOperator(delegateData, + tfLiteContext, + inputTensorInfo0, + inputTensorInfo1, + outputTensorInfo); + case kTfLiteBuiltinMul: + return ValidateDivOperator(delegateData, + tfLiteContext, + inputTensorInfo0, + inputTensorInfo1, + outputTensorInfo); + case kTfLiteBuiltinSub: + return ValidateDivOperator(delegateData, + tfLiteContext, + inputTensorInfo0, + inputTensorInfo1, + outputTensorInfo); default: return kTfLiteError; } @@ -113,7 +247,22 @@ TfLiteStatus VisitElementwiseBinaryOperator(DelegateData& delegateData, switch(elementwiseBinaryOperatorCode) { case kTfLiteBuiltinAdd: - elementwiseBinaryLayer = AddAdditionLayer(delegateData); + elementwiseBinaryLayer = delegateData.m_Network->AddAdditionLayer(); + break; + case kTfLiteBuiltinDiv: + elementwiseBinaryLayer = delegateData.m_Network->AddDivisionLayer(); + break; + case kTfLiteBuiltinMaximum: + elementwiseBinaryLayer = delegateData.m_Network->AddMaximumLayer(); + break; + case kTfLiteBuiltinMinimum: + elementwiseBinaryLayer = delegateData.m_Network->AddMinimumLayer(); + break; + case kTfLiteBuiltinMul: + elementwiseBinaryLayer = delegateData.m_Network->AddMultiplicationLayer(); + break; + case kTfLiteBuiltinSub: + elementwiseBinaryLayer = delegateData.m_Network->AddSubtractionLayer(); break; default: return kTfLiteError; diff --git a/delegate/src/test/ElementwiseBinaryTest.cpp b/delegate/src/test/ElementwiseBinaryTest.cpp index bd4019a686..2a8c91b2ef 100644 --- a/delegate/src/test/ElementwiseBinaryTest.cpp +++ b/delegate/src/test/ElementwiseBinaryTest.cpp @@ -19,18 +19,11 @@ namespace armnnDelegate { -TEST_SUITE("ElementwiseBinaryTest") -{ - -TEST_CASE ("Add_Float32_GpuAcc_Test") +void AddFP32Test(std::vector& backends) { - // Create the ArmNN Delegate - std::vector backends = { armnn::Compute::GpuAcc, - armnn::Compute::CpuRef }; - // Set input data std::vector input0Shape { 2, 2, 2, 3 }; std::vector input1Shape { 2, 2, 2, 3 }; - std::vector outputShape { 2, 2, 2, 3 }; + std::vector expectedOutputShape { 2, 2, 2, 3 }; std::vector input0Values = { @@ -45,7 +38,6 @@ TEST_CASE ("Add_Float32_GpuAcc_Test") 0.0f, 0.0f, 1.0f, 0.2f, 1.0f, 2.0f, - }; std::vector input1Values = @@ -78,27 +70,23 @@ TEST_CASE ("Add_Float32_GpuAcc_Test") 0.9f, 2.0f, 7.0f, }; - - ElementwiseBinaryFP32Test(tflite::BuiltinOperator_ADD, - tflite::ActivationFunctionType_NONE, - backends, - input0Shape, - input1Shape, - outputShape, - input0Values, - input1Values, - expectedOutputValues); + ElementwiseBinaryTest(tflite::BuiltinOperator_ADD, + tflite::ActivationFunctionType_NONE, + ::tflite::TensorType_FLOAT32, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues); } -TEST_CASE ("Add_Broadcast_Float32_GpuAcc_Test") +void AddBroadcastTest(std::vector& backends) { - // Create the ArmNN Delegate - std::vector backends = { armnn::Compute::GpuAcc, - armnn::Compute::CpuRef }; - // Set input data std::vector input0Shape { 1, 3, 2, 1 }; std::vector input1Shape { 1, 1, 2, 3 }; - std::vector outputShape { 1, 3, 2, 3 }; + std::vector expectedOutputShape { 1, 3, 2, 3 }; std::vector input0Values { @@ -128,42 +116,830 @@ TEST_CASE ("Add_Broadcast_Float32_GpuAcc_Test") 4.5f, 5.5f, 6.5f, 8.5f, 9.5f, 10.5f, }; - ElementwiseBinaryFP32Test(tflite::BuiltinOperator_ADD, - tflite::ActivationFunctionType_NONE, - backends, - input0Shape, - input1Shape, - outputShape, - input0Values, - input1Values, - expectedOutputValues); + + ElementwiseBinaryTest(tflite::BuiltinOperator_ADD, + tflite::ActivationFunctionType_NONE, + ::tflite::TensorType_FLOAT32, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues); } -TEST_CASE ("Add_ActivationRELU_Float32_GpuAcc_Test") +void AddActivationTest(std::vector& backends) { - // Create the ArmNN Delegate - std::vector backends = { armnn::Compute::GpuAcc, - armnn::Compute::CpuRef }; - // Set input data std::vector input0Shape { 1, 2, 2, 1 }; std::vector input1Shape { 1, 2, 2, 1 }; - std::vector outputShape { 1, 2, 2, 1 }; + std::vector expectedOutputShape { 1, 2, 2, 1 }; std::vector input0Values { 4.0f, 0.8f, 0.7f, -0.8f }; std::vector input1Values { 0.7f, -1.2f, 0.8f, 0.5f }; - // Set output data std::vector expectedOutputValues { 4.7f, 0.0f, 1.5f, 0.0f }; - ElementwiseBinaryFP32Test(tflite::BuiltinOperator_ADD, - tflite::ActivationFunctionType_RELU, - backends, - input0Shape, - input1Shape, - outputShape, - input0Values, - input1Values, - expectedOutputValues); + + ElementwiseBinaryTest(tflite::BuiltinOperator_ADD, + tflite::ActivationFunctionType_RELU, + ::tflite::TensorType_FLOAT32, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues); +} + +void AddUint8Test(std::vector& backends) +{ + std::vector input0Shape { 1, 2, 2, 3 }; + std::vector input1Shape { 1, 2, 2, 3 }; + std::vector expectedOutputShape { 1, 2, 2, 3 }; + + std::vector input0Values = + { + 63, 35, 77, 70, 56, 112, + 203, 28, 252, 168, 245, 91 + }; + + std::vector input1Values = + { + 21, 7, 175, 231, 175, 210, + 126, 161, 63, 21, 105, 126 + }; + + std::vector expectedOutputValues = + { + 81, 39, 249, 255, 228, 255, + 255, 186, 255, 186, 255, 214, + }; + + ElementwiseBinaryTest(tflite::BuiltinOperator_ADD, + tflite::ActivationFunctionType_NONE, + ::tflite::TensorType_UINT8, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues, 7.0f, 3); +} + +void DivFP32Test(std::vector& backends) +{ + std::vector input0Shape { 2, 2, 2, 2 }; + std::vector input1Shape { 2, 2, 2, 2 }; + std::vector expectedOutputShape { 2, 2, 2, 2 }; + + std::vector input0Values = + { + 2.f, 2.f, 2.f, 2.f, 3.f, 3.f, 3.f, 3.f, + 4.f, 4.f, 4.f, 4.f, 5.f, 5.f, 5.f, 5.f + + }; + + std::vector input1Values = + { + 1.f, 1.f, 1.f, 1.f, 2.f, 2.f, 2.f, 2.f, + 4.f, 4.f, 4.f, 4.f, 4.f, 4.f, 4.f, 4.f + }; + + std::vector expectedOutputValues = + { + 2.f, 2.f, 2.f, 2.f, 1.50f, 1.50f, 1.50f, 1.50f, + 1.f, 1.f, 1.f, 1.f, 1.25f, 1.25f, 1.25f, 1.25f + }; + + ElementwiseBinaryTest(tflite::BuiltinOperator_DIV, + tflite::ActivationFunctionType_NONE, + ::tflite::TensorType_FLOAT32, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues); +} + +void DivBroadcastTest(std::vector& backends) +{ + std::vector input0Shape { 1, 2, 2, 2 }; + std::vector input1Shape { 1, 1, 1, 1 }; + std::vector expectedOutputShape { 1, 2, 2, 2 }; + + std::vector input0Values = { 2, 4, 6, 8, 10, 12, 14, 16 }; + std::vector input1Values = { 2 }; + std::vector expectedOutputValues = { 1, 2, 3, 4, 5, 6, 7, 8 }; + + ElementwiseBinaryTest(tflite::BuiltinOperator_DIV, + tflite::ActivationFunctionType_NONE, + ::tflite::TensorType_FLOAT32, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues); } +void DivUint8Test(std::vector& backends) +{ + std::vector input0Shape { 2, 2, 2, 2 }; + std::vector input1Shape { 2, 2, 2, 2 }; + std::vector expectedOutputShape { 2, 2, 2, 2 }; + + std::vector input0Values = + { + 2, 2, 2, 2, 3, 3, 3, 3, + 4, 4, 4, 4, 5, 5, 5, 5 + + }; + + std::vector input1Values = + { + 1, 1, 1, 1, 2, 2, 2, 2, + 4, 4, 4, 4, 4, 4, 4, 4 + }; + + std::vector expectedOutputValues = + { + 8, 8, 8, 8, 6, 6, 6, 6, + 4, 4, 4, 4, 5, 5, 5, 5 + }; + + ElementwiseBinaryTest(tflite::BuiltinOperator_DIV, + tflite::ActivationFunctionType_NONE, + ::tflite::TensorType_UINT8, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues, 0.25f, 0); } +void MaxFP32Test(std::vector& backends) +{ + std::vector input0Shape { 2, 2, 2, 2 }; + std::vector input1Shape { 2, 2, 2, 2 }; + std::vector expectedOutputShape { 2, 2, 2, 2 }; + + std::vector input0Values = + { + 1.f, 1.f, 5.f, 1.f, 2.f, 2.f, 7.f, 2.f, + 3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f + + }; + + std::vector input1Values = + { + 2.f, 2.f, 2.f, 2.f, 3.f, 3.f, 3.f, 3.f, + 4.f, 4.f, 4.f, 4.f, 5.f, 5.f, 5.f, 5.f + }; + + std::vector expectedOutputValues = + { + 2.f, 2.f, 5.f, 2.f, 3.f, 3.f, 7.f, 3.f, + 4.f, 4.f, 4.f, 4.f, 5.f, 5.f, 5.f, 5.f + }; + + ElementwiseBinaryTest(tflite::BuiltinOperator_MAXIMUM, + tflite::ActivationFunctionType_NONE, + ::tflite::TensorType_FLOAT32, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues); +} + +void MaxBroadcastTest(std::vector& backends) +{ + std::vector input0Shape { 1, 2, 2, 2 }; + std::vector input1Shape { 1, 1, 1, 1 }; + std::vector expectedOutputShape { 1, 2, 2, 2 }; + + std::vector input0Values = { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f }; + std::vector input1Values = { 4.f }; + std::vector expectedOutputValues = { 4.f, 4.f, 4.f, 4.f, 5.f, 6.f, 7.f, 8.f }; + + ElementwiseBinaryTest(tflite::BuiltinOperator_MAXIMUM, + tflite::ActivationFunctionType_NONE, + ::tflite::TensorType_FLOAT32, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues); +} + +void MaxUint8Test(std::vector& backends) +{ + std::vector input0Shape { 2, 2, 2, 2 }; + std::vector input1Shape { 2, 2, 2, 2 }; + std::vector expectedOutputShape { 2, 2, 2, 2 }; + + std::vector input0Values = + { + 1, 1, 1, 1, 7, 8, 9, 9, + 3, 3, 3, 3, 4, 4, 4, 4 + + }; + + std::vector input1Values = + { + 2, 2, 2, 2, 3, 3, 3, 3, + 4, 4, 4, 4, 5, 5, 5, 5 + }; + + std::vector expectedOutputValues = + { + 2, 2, 2, 2, 7, 8, 9, 9, + 4, 4, 4, 4, 5, 5, 5, 5 + }; + + ElementwiseBinaryTest(tflite::BuiltinOperator_MAXIMUM, + tflite::ActivationFunctionType_NONE, + ::tflite::TensorType_UINT8, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues, 1.0f, 0); +} + +void MinFP32Test(std::vector& backends) +{ + std::vector input0Shape { 2, 2, 2, 2 }; + std::vector input1Shape { 2, 2, 2, 2 }; + std::vector expectedOutputShape { 2, 2, 2, 2 }; + + std::vector input0Values = + { + 1.f, 1.f, 5.f, 1.f, 2.f, 2.f, 7.f, 2.f, + 3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f + + }; + + std::vector input1Values = + { + 2.f, 2.f, 2.f, 2.f, 3.f, 3.f, 3.f, 3.f, + 1.f, 1.f, 1.f, 1.f, 5.f, 5.f, 5.f, 5.f + }; + + std::vector expectedOutputValues = + { + 1.f, 1.f, 2.f, 1.f, 2.f, 2.f, 3.f, 2.f, + 1.f, 1.f, 1.f, 1.f, 4.f, 4.f, 4.f, 4.f + }; + + ElementwiseBinaryTest(tflite::BuiltinOperator_MINIMUM, + tflite::ActivationFunctionType_NONE, + ::tflite::TensorType_FLOAT32, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues); +} + +void MinBroadcastTest(std::vector& backends) +{ + std::vector input0Shape { 1, 2, 2, 2 }; + std::vector input1Shape { 1, 1, 1, 1 }; + std::vector expectedOutputShape { 1, 2, 2, 2 }; + + std::vector input0Values = { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f }; + + std::vector input1Values = { 4.f }; + + std::vector expectedOutputValues = { 1.f, 2.f, 3.f, 4.f, 4.f, 4.f, 4.f, 4.f }; + + ElementwiseBinaryTest(tflite::BuiltinOperator_MINIMUM, + tflite::ActivationFunctionType_NONE, + ::tflite::TensorType_FLOAT32, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues); +} + +void MinUint8Test(std::vector& backends) +{ + std::vector input0Shape { 2, 2, 2, 2 }; + std::vector input1Shape { 2, 2, 2, 2 }; + std::vector expectedOutputShape { 2, 2, 2, 2 }; + + std::vector input0Values = + { + 1, 1, 1, 1, 7, 8, 9, 9, + 3, 3, 3, 3, 4, 4, 4, 4 + + }; + + std::vector input1Values = + { + 2, 2, 2, 2, 3, 3, 3, 3, + 4, 4, 4, 4, 5, 5, 5, 5 + }; + + std::vector expectedOutputValues = + { + 1, 1, 1, 1, 3, 3, 3, 3, + 3, 3, 3, 3, 4, 4, 4, 4 + }; + + ElementwiseBinaryTest(tflite::BuiltinOperator_MINIMUM, + tflite::ActivationFunctionType_NONE, + ::tflite::TensorType_UINT8, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues, 1.0f, 0); +} + +void MulFP32Test(std::vector& backends) +{ + std::vector input0Shape { 2, 2, 2, 2 }; + std::vector input1Shape { 2, 2, 2, 2 }; + std::vector expectedOutputShape { 2, 2, 2, 2 }; + + std::vector input0Values = + { + 1.f, 1.f, 1.f, 1.f, 2.f, 2.f, 2.f, 2.f, + 3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f + + }; + + std::vector input1Values = + { + 2.f, 2.f, 2.f, 2.f, 3.f, 3.f, 3.f, 3.f, + 4.f, 4.f, 4.f, 4.f, 5.f, 5.f, 5.f, 5.f + }; + + std::vector expectedOutputValues = + { + 2.f, 2.f, 2.f, 2.f, 6.f, 6.f, 6.f, 6.f, + 12.f, 12.f, 12.f, 12.f, 20.f, 20.f, 20.f, 20.f + }; + + ElementwiseBinaryTest(tflite::BuiltinOperator_MUL, + tflite::ActivationFunctionType_NONE, + ::tflite::TensorType_FLOAT32, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues); +} + +void MulBroadcastTest(std::vector& backends) +{ + std::vector input0Shape { 1, 2, 2, 2 }; + std::vector input1Shape { 1, 1, 1, 1 }; + std::vector expectedOutputShape { 1, 2, 2, 2 }; + + std::vector input0Values = { 2, 4, 6, 8, 10, 12, 14, 16 }; + std::vector input1Values = { 2 }; + std::vector expectedOutputValues = { 4, 8, 12, 16, 20, 24, 28, 32 }; + + ElementwiseBinaryTest(tflite::BuiltinOperator_MUL, + tflite::ActivationFunctionType_NONE, + ::tflite::TensorType_FLOAT32, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues); +} + +void MulUint8Test(std::vector& backends) +{ + std::vector input0Shape { 1, 2, 2, 3 }; + std::vector input1Shape { 1, 1, 1, 3 }; + std::vector expectedOutputShape { 1, 2, 2, 3 }; + + std::vector input0Values = + { + 1, 2, 3, 4, 5, 6, + 7, 8, 9, 10, 11, 12 + + }; + + std::vector input1Values = { 1, 2, 3 }; + + std::vector expectedOutputValues = + { + 1, 4, 9, 4, 10, 18, + 7, 16, 27, 10, 22, 36 + }; + + ElementwiseBinaryTest(tflite::BuiltinOperator_MUL, + tflite::ActivationFunctionType_NONE, + ::tflite::TensorType_UINT8, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues, 1.0f, 0); +} + +void MulActivationTest(std::vector& backends) +{ + std::vector input0Shape { 1, 2, 2, 1 }; + std::vector input1Shape { 1, 2, 2, 1 }; + std::vector expectedOutputShape { 1, 2, 2, 1 }; + + std::vector input0Values { 4.0f, 0.0f, 1.0f, 0.5f }; + std::vector input1Values { -2.0f, -1.2f, 2.5f, 2.0f }; + std::vector expectedOutputValues { 0.0f, 0.0f, 2.5f, 1.0f }; + + ElementwiseBinaryTest(tflite::BuiltinOperator_MUL, + tflite::ActivationFunctionType_RELU, + ::tflite::TensorType_FLOAT32, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues); +} + +void SubFP32Test(std::vector& backends) +{ + std::vector input0Shape { 1, 1, 2, 2 }; + std::vector input1Shape { 1, 1, 2, 2 }; + std::vector expectedOutputShape { 1, 1, 2, 2 }; + + std::vector input0Values = { 1, 3, 3, -7 }; + std::vector input1Values = { 1, -1, 0, -2 }; + std::vector expectedOutputValues = { 0, 4, 3, -5 }; + + ElementwiseBinaryTest(tflite::BuiltinOperator_SUB, + tflite::ActivationFunctionType_NONE, + ::tflite::TensorType_FLOAT32, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues); +} + +void SubBroadcastTest(std::vector& backends) +{ + std::vector input0Shape { 1, 1, 2, 2 }; + std::vector input1Shape { 1, 1, 1, 1 }; + std::vector expectedOutputShape { 1, 1, 2, 2 }; + + std::vector input0Values = { 2, 3, 4, 5}; + std::vector input1Values = { 10 }; + std::vector expectedOutputValues = { -8, -7, -6, -5 }; + + ElementwiseBinaryTest(tflite::BuiltinOperator_SUB, + tflite::ActivationFunctionType_NONE, + ::tflite::TensorType_FLOAT32, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues); +} + +void SubUint8Test(std::vector& backends) +{ + std::vector input0Shape { 1, 1, 2, 2 }; + std::vector input1Shape { 1, 1, 1, 1 }; + std::vector expectedOutputShape { 1, 1, 2, 2 }; + + std::vector input0Values = { 10, 12, 14, 16 }; + std::vector input1Values = { 2 }; + std::vector expectedOutputValues = { 8, 10, 12, 14 }; + + ElementwiseBinaryTest(tflite::BuiltinOperator_SUB, + tflite::ActivationFunctionType_NONE, + ::tflite::TensorType_UINT8, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues, 1.0f, 0); +} + +TEST_SUITE("ElementwiseBinaryTest") +{ + +TEST_CASE ("ADD_FP32_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + AddFP32Test(backends); +} + +TEST_CASE ("ADD_FP32_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + AddFP32Test(backends); +} + +TEST_CASE ("ADD_Broadcast_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + AddBroadcastTest(backends); +} + +TEST_CASE ("ADD_Broadcast_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + AddBroadcastTest(backends); +} + +TEST_CASE ("ADD_Activation_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + AddActivationTest(backends); +} + +TEST_CASE ("ADD_Actiation_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + AddActivationTest(backends); +} + +TEST_CASE ("ADD_UINT8_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + AddUint8Test(backends); +} + +TEST_CASE ("ADD_UINT8_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + AddUint8Test(backends); +} + +TEST_CASE ("DIV_FP32_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + DivFP32Test(backends); +} + +TEST_CASE ("DIV_FP32_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + DivFP32Test(backends); +} + +TEST_CASE ("DIV_Broadcast_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + DivBroadcastTest(backends); +} + +TEST_CASE ("DIV_Broadcast_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + DivBroadcastTest(backends); +} + +TEST_CASE ("DIV_UINT8_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + DivUint8Test(backends); +} + +TEST_CASE ("DIV_UINT8_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + DivUint8Test(backends); +} + +TEST_CASE ("MAX_FP32_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + MaxFP32Test(backends); +} + +TEST_CASE ("MAX_FP32_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + MaxFP32Test(backends); +} + +TEST_CASE ("MAX_Broadcast_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + MaxBroadcastTest(backends); +} + +TEST_CASE ("MAX_Broadcast_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + MaxBroadcastTest(backends); +} + +TEST_CASE ("MAX_UINT8_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + MaxUint8Test(backends); +} + +TEST_CASE ("MAX_UINT8_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + MaxUint8Test(backends); +} + +TEST_CASE ("MIN_FP32_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + MinFP32Test(backends); +} + +TEST_CASE ("MIN_FP32_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + MinFP32Test(backends); +} + +TEST_CASE ("MIN_Broadcast_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + MinBroadcastTest(backends); +} + +TEST_CASE ("MIN_Broadcast_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + MinBroadcastTest(backends); +} + +TEST_CASE ("MIN_UINT8_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + MinUint8Test(backends); +} + +TEST_CASE ("MIN_UINT8_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + MinUint8Test(backends); +} + +TEST_CASE ("MUL_FP32_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + MulFP32Test(backends); +} + +TEST_CASE ("MUL_FP32_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + MulFP32Test(backends); +} + +TEST_CASE ("MUL_Broadcast_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + MulBroadcastTest(backends); +} + +TEST_CASE ("MUL_Broadcast_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + MulBroadcastTest(backends); +} + +TEST_CASE ("MUL_Activation_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + MulActivationTest(backends); +} + +TEST_CASE ("MUL_Actiation_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + MulActivationTest(backends); +} + +TEST_CASE ("MUL_UINT8_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + MulUint8Test(backends); +} + +TEST_CASE ("MUL_UINT8_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + MulUint8Test(backends); +} + +TEST_CASE ("SUB_FP32_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + SubFP32Test(backends); +} + +TEST_CASE ("SUB_FP32_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + SubFP32Test(backends); +} + +TEST_CASE ("SUB_Broadcast_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + SubBroadcastTest(backends); +} + +TEST_CASE ("SUB_Broadcast_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + SubBroadcastTest(backends); +} + +TEST_CASE ("SUB_UINT8_GpuAcc_Test") +{ + std::vector backends = { armnn::Compute::GpuAcc, + armnn::Compute::CpuRef }; + SubUint8Test(backends); +} + +TEST_CASE ("SUB_UINT8_CpuAcc_Test") +{ + std::vector backends = { armnn::Compute::CpuAcc, + armnn::Compute::CpuRef }; + SubUint8Test(backends); +} + +} // End of TEST_SUITE("ElementwiseBinaryTest") + } // namespace armnnDelegate \ No newline at end of file diff --git a/delegate/src/test/ElementwiseBinaryTestHelper.hpp b/delegate/src/test/ElementwiseBinaryTestHelper.hpp index 72f9f850c8..60092726a2 100644 --- a/delegate/src/test/ElementwiseBinaryTestHelper.hpp +++ b/delegate/src/test/ElementwiseBinaryTestHelper.hpp @@ -24,7 +24,9 @@ std::vector CreateElementwiseBinaryTfLiteModel(tflite::BuiltinOperator bin tflite::TensorType tensorType, const std::vector & input0TensorShape, const std::vector & input1TensorShape, - const std::vector & outputTensorShape) + const std::vector & outputTensorShape, + float quantScale = 1.0f, + int quantOffset = 0) { using namespace tflite; flatbuffers::FlatBufferBuilder flatBufferBuilder; @@ -32,19 +34,36 @@ std::vector CreateElementwiseBinaryTfLiteModel(tflite::BuiltinOperator bin std::vector> buffers; buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}))); + auto quantizationParameters = + CreateQuantizationParameters(flatBufferBuilder, + 0, + 0, + flatBufferBuilder.CreateVector({ quantScale }), + flatBufferBuilder.CreateVector({ quantOffset })); + + std::array, 3> tensors; tensors[0] = CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(input0TensorShape.data(), input0TensorShape.size()), - tensorType, 0); + tensorType, + 0, + flatBufferBuilder.CreateString("input_0"), + quantizationParameters); tensors[1] = CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(input1TensorShape.data(), input1TensorShape.size()), - tensorType, 0); + tensorType, + 0, + flatBufferBuilder.CreateString("input_1"), + quantizationParameters); tensors[2] = CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(outputTensorShape.data(), outputTensorShape.size()), - tensorType); + tensorType, + 0, + flatBufferBuilder.CreateString("output"), + quantizationParameters); // create operator tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_NONE; @@ -63,6 +82,18 @@ std::vector CreateElementwiseBinaryTfLiteModel(tflite::BuiltinOperator bin operatorBuiltinOptions = CreateDivOptions(flatBufferBuilder, activationType).Union(); break; } + case BuiltinOperator_MAXIMUM: + { + operatorBuiltinOptionsType = BuiltinOptions_MaximumMinimumOptions; + operatorBuiltinOptions = CreateMaximumMinimumOptions(flatBufferBuilder).Union(); + break; + } + case BuiltinOperator_MINIMUM: + { + operatorBuiltinOptionsType = BuiltinOptions_MaximumMinimumOptions; + operatorBuiltinOptions = CreateMaximumMinimumOptions(flatBufferBuilder).Union(); + break; + } case BuiltinOperator_MUL: { operatorBuiltinOptionsType = BuiltinOptions_MulOptions; @@ -115,23 +146,29 @@ std::vector CreateElementwiseBinaryTfLiteModel(tflite::BuiltinOperator bin flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); } -void ElementwiseBinaryFP32Test(tflite::BuiltinOperator binaryOperatorCode, - tflite::ActivationFunctionType activationType, - std::vector& backends, - std::vector& input0Shape, - std::vector& input1Shape, - std::vector& outputShape, - std::vector& input0Values, - std::vector& input1Values, - std::vector& expectedOutputValues) +template +void ElementwiseBinaryTest(tflite::BuiltinOperator binaryOperatorCode, + tflite::ActivationFunctionType activationType, + tflite::TensorType tensorType, + std::vector& backends, + std::vector& input0Shape, + std::vector& input1Shape, + std::vector& outputShape, + std::vector& input0Values, + std::vector& input1Values, + std::vector& expectedOutputValues, + float quantScale = 1.0f, + int quantOffset = 0) { using namespace tflite; std::vector modelBuffer = CreateElementwiseBinaryTfLiteModel(binaryOperatorCode, activationType, - ::tflite::TensorType_FLOAT32, - input0Shape, - input1Shape, - outputShape); + tensorType, + input0Shape, + input1Shape, + outputShape, + quantScale, + quantOffset); const Model* tfLiteModel = GetModel(modelBuffer.data()); // Create TfLite Interpreters @@ -158,28 +195,28 @@ void ElementwiseBinaryFP32Test(tflite::BuiltinOperator binaryOperatorCode, // Set input data auto tfLiteDelegateInput0Id = tfLiteInterpreter->inputs()[0]; - auto tfLiteDelageInput0Data = tfLiteInterpreter->typed_tensor(tfLiteDelegateInput0Id); + auto tfLiteDelageInput0Data = tfLiteInterpreter->typed_tensor(tfLiteDelegateInput0Id); for (unsigned int i = 0; i < input0Values.size(); ++i) { tfLiteDelageInput0Data[i] = input0Values[i]; } auto tfLiteDelegateInput1Id = tfLiteInterpreter->inputs()[1]; - auto tfLiteDelageInput1Data = tfLiteInterpreter->typed_tensor(tfLiteDelegateInput1Id); + auto tfLiteDelageInput1Data = tfLiteInterpreter->typed_tensor(tfLiteDelegateInput1Id); for (unsigned int i = 0; i < input1Values.size(); ++i) { tfLiteDelageInput1Data[i] = input1Values[i]; } auto armnnDelegateInput0Id = armnnDelegateInterpreter->inputs()[0]; - auto armnnDelegateInput0Data = armnnDelegateInterpreter->typed_tensor(armnnDelegateInput0Id); + auto armnnDelegateInput0Data = armnnDelegateInterpreter->typed_tensor(armnnDelegateInput0Id); for (unsigned int i = 0; i < input0Values.size(); ++i) { armnnDelegateInput0Data[i] = input0Values[i]; } auto armnnDelegateInput1Id = armnnDelegateInterpreter->inputs()[1]; - auto armnnDelegateInput1Data = armnnDelegateInterpreter->typed_tensor(armnnDelegateInput1Id); + auto armnnDelegateInput1Data = armnnDelegateInterpreter->typed_tensor(armnnDelegateInput1Id); for (unsigned int i = 0; i < input1Values.size(); ++i) { armnnDelegateInput1Data[i] = input1Values[i]; @@ -191,9 +228,9 @@ void ElementwiseBinaryFP32Test(tflite::BuiltinOperator binaryOperatorCode, // Compare output data auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0]; - auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); + auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0]; - auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateOutputId); + auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateOutputId); for (size_t i = 0; i < expectedOutputValues.size(); i++) { CHECK(expectedOutputValues[i] == armnnDelegateOutputData[i]); @@ -204,8 +241,4 @@ void ElementwiseBinaryFP32Test(tflite::BuiltinOperator binaryOperatorCode, armnnDelegateInterpreter.reset(nullptr); } -} // anonymous namespace - - - - +} // anonymous namespace \ No newline at end of file -- cgit v1.2.1