From f7ac72c85c90c61be14fff16c9c2ff638fa32c40 Mon Sep 17 00:00:00 2001 From: Sadik Armagan Date: Wed, 5 May 2021 15:03:50 +0100 Subject: IVGCVSW-5418 'ExecuteNetwork test for MobileBERT' * Refactored the code for checking constant inputs. * Added a unit test for ADD operator with constant input. Signed-off-by: Sadik Armagan Change-Id: Ie7207e5a1ce77ea305552859de32a66e07c68a6f --- delegate/src/DelegateUtils.hpp | 40 ++++++++++ delegate/src/ElementwiseBinary.hpp | 31 ++------ delegate/src/Fill.hpp | 17 ++-- delegate/src/Gather.hpp | 10 ++- delegate/src/LogicalBinary.hpp | 31 ++------ delegate/src/Quantization.hpp | 9 +++ delegate/src/test/ElementwiseBinaryTest.cpp | 55 +++++++++++++ delegate/src/test/ElementwiseBinaryTestHelper.hpp | 96 +++++++++++------------ 8 files changed, 175 insertions(+), 114 deletions(-) diff --git a/delegate/src/DelegateUtils.hpp b/delegate/src/DelegateUtils.hpp index 76d21f6332..fa38ee3e90 100644 --- a/delegate/src/DelegateUtils.hpp +++ b/delegate/src/DelegateUtils.hpp @@ -555,6 +555,46 @@ TfLiteStatus ConnectConstant(armnn::IConnectableLayer* layer, return kTfLiteOk; } +TfLiteStatus ProcessInputs(armnn::IConnectableLayer* layer, + armnnDelegate::DelegateData& delegateData, + TfLiteContext* tfLiteContext, + TfLiteNode* tfLiteNode) +{ + const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors; + // Process input tensors + // If input tensor is a Constant tensor create a constant layer and connect it to the network + for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex) + { + const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[inputIndex]]; + if(tflite::IsConstantTensor(&tfLiteInputTensor)) + { + armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor); + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + tfLiteContext, + IsConstantSupported, + delegateData.m_Backends, + isSupported, + inputTensorInfo); + if (!isSupported) + { + return kTfLiteError; + } + auto constantInput = CreateConstTensor(&tfLiteInputTensor, + inputTensorInfo, + armnn::Optional()); + armnn::IConnectableLayer* constantLayer = delegateData.m_Network->AddConstantLayer(constantInput); + armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0); + outputSlot.SetTensorInfo(inputTensorInfo); + + delegateData.m_OutputSlotForNode[static_cast(inputIndex)] = &outputSlot; + + } + + } + return kTfLiteOk; +} + unsigned int ComputeWrappedIndex(int index, unsigned int numDimensions) { int numDims = armnn::numeric_cast(numDimensions); diff --git a/delegate/src/ElementwiseBinary.hpp b/delegate/src/ElementwiseBinary.hpp index 49a5dfb0d9..58d7aca0ee 100644 --- a/delegate/src/ElementwiseBinary.hpp +++ b/delegate/src/ElementwiseBinary.hpp @@ -273,32 +273,13 @@ TfLiteStatus VisitElementwiseBinaryOperator(DelegateData& delegateData, armnn::IOutputSlot& outputSlot = elementwiseBinaryLayer->GetOutputSlot(0); outputSlot.SetTensorInfo(outputTensorInfo); - if(tflite::IsConstantTensor(&tfLiteInputTensor0)) + auto inputsTensorsProcess = ProcessInputs(elementwiseBinaryLayer, + delegateData, + tfLiteContext, + tfLiteNode); + if (inputsTensorsProcess == kTfLiteError) { - auto status = ConnectConstant(elementwiseBinaryLayer, - inputTensorInfo0, - tfLiteContext, - tfLiteInputTensor0, - delegateData, - tfLiteNode->inputs->data[0]); - if (status == kTfLiteError) - { - return status; - } - } - - if(tflite::IsConstantTensor(&tfLiteInputTensor1)) - { - auto status = ConnectConstant(elementwiseBinaryLayer, - inputTensorInfo1, - tfLiteContext, - tfLiteInputTensor1, - delegateData, - tfLiteNode->inputs->data[1]); - if (status == kTfLiteError) - { - return status; - } + return inputsTensorsProcess; } auto reshapeLayer = BroadcastTensor(inputTensorInfo0, diff --git a/delegate/src/Fill.hpp b/delegate/src/Fill.hpp index 23f332fed0..c6f94dd83c 100644 --- a/delegate/src/Fill.hpp +++ b/delegate/src/Fill.hpp @@ -96,18 +96,13 @@ TfLiteStatus VisitFillOperator(DelegateData& delegateData, armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0); outputSlot.SetTensorInfo(outputTensorInfo); - if(tflite::IsConstantTensor(&tfLiteInputTensor)) + auto inputsTensorsProcess = ProcessInputs(layer, + delegateData, + tfLiteContext, + tfLiteNode); + if (inputsTensorsProcess == kTfLiteError) { - auto status = ConnectConstant(layer, - inputTensorInfo, - tfLiteContext, - tfLiteInputTensor, - delegateData, - tfLiteNode->inputs->data[0]); - if (status == kTfLiteError) - { - return status; - } + return inputsTensorsProcess; } return Connect(layer, tfLiteNode, delegateData); diff --git a/delegate/src/Gather.hpp b/delegate/src/Gather.hpp index 9ed0fe15c1..634373a341 100644 --- a/delegate/src/Gather.hpp +++ b/delegate/src/Gather.hpp @@ -87,9 +87,17 @@ TfLiteStatus VisitGatherOperator(DelegateData& delegateData, armnn::IConnectableLayer* layer = delegateData.m_Network->AddGatherLayer(gatherDescriptor); ARMNN_ASSERT(layer != nullptr); - layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); + auto inputsTensorsProcess = ProcessInputs(layer, + delegateData, + tfLiteContext, + tfLiteNode); + if (inputsTensorsProcess == kTfLiteError) + { + return inputsTensorsProcess; + } + Connect(layer, tfLiteNode, delegateData); return kTfLiteOk; diff --git a/delegate/src/LogicalBinary.hpp b/delegate/src/LogicalBinary.hpp index 07b55c3e32..d877585849 100644 --- a/delegate/src/LogicalBinary.hpp +++ b/delegate/src/LogicalBinary.hpp @@ -77,32 +77,13 @@ TfLiteStatus VisitLogicalBinaryOperator(DelegateData& delegateData, armnn::IOutputSlot& outputSlot = logicalBinaryLayer->GetOutputSlot(0); outputSlot.SetTensorInfo(outputTensorInfo); - if(tflite::IsConstantTensor(&tfLiteInputTensor0)) + auto inputsTensorsProcess = ProcessInputs(logicalBinaryLayer, + delegateData, + tfLiteContext, + tfLiteNode); + if (inputsTensorsProcess == kTfLiteError) { - auto status = ConnectConstant(logicalBinaryLayer, - inputTensorInfo0, - tfLiteContext, - tfLiteInputTensor0, - delegateData, - tfLiteNode->inputs->data[0]); - if (status == kTfLiteError) - { - return status; - } - } - - if(tflite::IsConstantTensor(&tfLiteInputTensor1)) - { - auto status = ConnectConstant(logicalBinaryLayer, - inputTensorInfo1, - tfLiteContext, - tfLiteInputTensor1, - delegateData, - tfLiteNode->inputs->data[1]); - if (status == kTfLiteError) - { - return status; - } + return inputsTensorsProcess; } // LogicalBinary operators support broadcasting diff --git a/delegate/src/Quantization.hpp b/delegate/src/Quantization.hpp index 565f1e37c0..3c274c6ff5 100644 --- a/delegate/src/Quantization.hpp +++ b/delegate/src/Quantization.hpp @@ -73,6 +73,15 @@ TfLiteStatus VisitDequantizeOperator(DelegateData& delegateData, armnn::IOutputSlot& outputSlot = dequantizeLayer->GetOutputSlot(0); outputSlot.SetTensorInfo(outputTensorInfo); + auto inputsTensorsProcess = ProcessInputs(dequantizeLayer, + delegateData, + tfLiteContext, + tfLiteNode); + if (inputsTensorsProcess == kTfLiteError) + { + return inputsTensorsProcess; + } + return Connect(dequantizeLayer, tfLiteNode, delegateData); } diff --git a/delegate/src/test/ElementwiseBinaryTest.cpp b/delegate/src/test/ElementwiseBinaryTest.cpp index cc447d9fc3..448b3e6fd9 100644 --- a/delegate/src/test/ElementwiseBinaryTest.cpp +++ b/delegate/src/test/ElementwiseBinaryTest.cpp @@ -129,6 +129,55 @@ void AddBroadcastTest(std::vector& backends) expectedOutputValues); } +void AddConstInputTest(std::vector& backends) +{ + std::vector input0Shape { 1, 3, 2, 1 }; + std::vector input1Shape { 1 }; + std::vector expectedOutputShape { 1, 3, 2, 1 }; + + std::vector input0Values + { + 0.0f, + 1.0f, + + 2.0f, + 3.0f, + + 4.0f, + 5.0f, + }; + std::vector input1Values + { + 0.5f + }; + // Set output data + std::vector expectedOutputValues + { + 0.5f, + 1.5f, + + 2.5f, + 3.5f, + + 4.5f, + 5.5f, + }; + + ElementwiseBinaryTest(tflite::BuiltinOperator_ADD, + tflite::ActivationFunctionType_NONE, + ::tflite::TensorType_FLOAT32, + backends, + input0Shape, + input1Shape, + expectedOutputShape, + input0Values, + input1Values, + expectedOutputValues, + 1.0f, + 0, + true); +} + void AddActivationTest(std::vector& backends) { std::vector input0Shape { 1, 2, 2, 1 }; @@ -913,6 +962,12 @@ TEST_CASE ("ADD_Broadcast_CpuRef_Test") AddBroadcastTest(backends); } +TEST_CASE ("ADD_Constant_Input_CpuRef_Test") +{ + std::vector backends = { armnn::Compute::CpuRef }; + AddConstInputTest(backends); +} + TEST_CASE ("ADD_Actiation_CpuRef_Test") { std::vector backends = { armnn::Compute::CpuRef }; diff --git a/delegate/src/test/ElementwiseBinaryTestHelper.hpp b/delegate/src/test/ElementwiseBinaryTestHelper.hpp index 0c096d85c3..13b336e91e 100644 --- a/delegate/src/test/ElementwiseBinaryTestHelper.hpp +++ b/delegate/src/test/ElementwiseBinaryTestHelper.hpp @@ -5,6 +5,8 @@ #pragma once +#include "TestUtils.hpp" + #include #include @@ -19,12 +21,15 @@ namespace { +template std::vector CreateElementwiseBinaryTfLiteModel(tflite::BuiltinOperator binaryOperatorCode, tflite::ActivationFunctionType activationType, tflite::TensorType tensorType, const std::vector & input0TensorShape, const std::vector & input1TensorShape, const std::vector & outputTensorShape, + std::vector& input1Values, + bool constantInput = false, float quantScale = 1.0f, int quantOffset = 0) { @@ -33,6 +38,18 @@ std::vector CreateElementwiseBinaryTfLiteModel(tflite::BuiltinOperator bin std::vector> buffers; buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}))); + if (constantInput) + { + buffers.push_back( + CreateBuffer(flatBufferBuilder, + flatBufferBuilder.CreateVector(reinterpret_cast(input1Values.data()), + sizeof(T) * input1Values.size()))); + } + else + { + buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}))); + } + buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}))); auto quantizationParameters = CreateQuantizationParameters(flatBufferBuilder, @@ -54,14 +71,14 @@ std::vector CreateElementwiseBinaryTfLiteModel(tflite::BuiltinOperator bin flatBufferBuilder.CreateVector(input1TensorShape.data(), input1TensorShape.size()), tensorType, - 0, + 1, flatBufferBuilder.CreateString("input_1"), quantizationParameters); tensors[2] = CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(outputTensorShape.data(), outputTensorShape.size()), tensorType, - 0, + 2, flatBufferBuilder.CreateString("output"), quantizationParameters); @@ -158,27 +175,30 @@ void ElementwiseBinaryTest(tflite::BuiltinOperator binaryOperatorCode, std::vector& input1Values, std::vector& expectedOutputValues, float quantScale = 1.0f, - int quantOffset = 0) + int quantOffset = 0, + bool constantInput = false) { using namespace tflite; - std::vector modelBuffer = CreateElementwiseBinaryTfLiteModel(binaryOperatorCode, - activationType, - tensorType, - input0Shape, - input1Shape, - outputShape, - quantScale, - quantOffset); + std::vector modelBuffer = CreateElementwiseBinaryTfLiteModel(binaryOperatorCode, + activationType, + tensorType, + input0Shape, + input1Shape, + outputShape, + input1Values, + constantInput, + quantScale, + quantOffset); const Model* tfLiteModel = GetModel(modelBuffer.data()); // Create TfLite Interpreters - std::unique_ptr armnnDelegateInterpreter; + std::unique_ptr armnnDelegateInterpreter; CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) (&armnnDelegateInterpreter) == kTfLiteOk); CHECK(armnnDelegateInterpreter != nullptr); CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); - std::unique_ptr tfLiteInterpreter; + std::unique_ptr tfLiteInterpreter; CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) (&tfLiteInterpreter) == kTfLiteOk); CHECK(tfLiteInterpreter != nullptr); @@ -187,57 +207,29 @@ void ElementwiseBinaryTest(tflite::BuiltinOperator binaryOperatorCode, // Create the ArmNN Delegate armnnDelegate::DelegateOptions delegateOptions(backends); std::unique_ptr - theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), - armnnDelegate::TfLiteArmnnDelegateDelete); + theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), + armnnDelegate::TfLiteArmnnDelegateDelete); CHECK(theArmnnDelegate != nullptr); // Modify armnnDelegateInterpreter to use armnnDelegate CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); // Set input data - auto tfLiteDelegateInput0Id = tfLiteInterpreter->inputs()[0]; - auto tfLiteDelageInput0Data = tfLiteInterpreter->typed_tensor(tfLiteDelegateInput0Id); - for (unsigned int i = 0; i < input0Values.size(); ++i) - { - tfLiteDelageInput0Data[i] = input0Values[i]; - } - - auto tfLiteDelegateInput1Id = tfLiteInterpreter->inputs()[1]; - auto tfLiteDelageInput1Data = tfLiteInterpreter->typed_tensor(tfLiteDelegateInput1Id); - for (unsigned int i = 0; i < input1Values.size(); ++i) - { - tfLiteDelageInput1Data[i] = input1Values[i]; - } - - auto armnnDelegateInput0Id = armnnDelegateInterpreter->inputs()[0]; - auto armnnDelegateInput0Data = armnnDelegateInterpreter->typed_tensor(armnnDelegateInput0Id); - for (unsigned int i = 0; i < input0Values.size(); ++i) + armnnDelegate::FillInput(tfLiteInterpreter, 0, input0Values); + armnnDelegate::FillInput(armnnDelegateInterpreter, 0, input0Values); + if (!constantInput) { - armnnDelegateInput0Data[i] = input0Values[i]; + armnnDelegate::FillInput(tfLiteInterpreter, 1, input1Values); + armnnDelegate::FillInput(armnnDelegateInterpreter, 1, input1Values); } - - auto armnnDelegateInput1Id = armnnDelegateInterpreter->inputs()[1]; - auto armnnDelegateInput1Data = armnnDelegateInterpreter->typed_tensor(armnnDelegateInput1Id); - for (unsigned int i = 0; i < input1Values.size(); ++i) - { - armnnDelegateInput1Data[i] = input1Values[i]; - } - // Run EnqueWorkload CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); // Compare output data - auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0]; - auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); - auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0]; - auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateOutputId); - for (size_t i = 0; i < expectedOutputValues.size(); i++) - { - CHECK(expectedOutputValues[i] == armnnDelegateOutputData[i]); - CHECK(tfLiteDelageOutputData[i] == expectedOutputValues[i]); - CHECK(tfLiteDelageOutputData[i] == armnnDelegateOutputData[i]); - } - + armnnDelegate::CompareOutputData(tfLiteInterpreter, + armnnDelegateInterpreter, + outputShape, + expectedOutputValues); armnnDelegateInterpreter.reset(nullptr); } -- cgit v1.2.1