// // Copyright © 2021 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once #include "TestUtils.hpp" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace { template std::vector CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType tensorType, int32_t batchSize, int32_t timeSize, int32_t inputSize, int32_t outputSize, int32_t numUnits, bool hasInputToInputWeights, const std::vector& inputToInputWeights, const std::vector& inputToForgetWeights, const std::vector& inputToCellWeights, const std::vector& inputToOutputWeights, bool hasRecurrentToInputWeights, const std::vector& recurrentToInputWeights, const std::vector& recurrentToForgetWeights, const std::vector& recurrentToCellWeights, const std::vector& recurrentToOutputWeights, bool hasCellToInputWeights, const std::vector& cellToInputWeights, bool hasCellToForgetWeights, const std::vector& cellToForgetWeights, bool hasCellToOutputWeights, const std::vector& cellToOutputWeights, bool hasInputGateBias, const std::vector& inputGateBias, const std::vector& forgetGateBias, const std::vector& cellBias, const std::vector& outputGateBias, bool hasProjectionWeights, const std::vector& projectionWeights, bool hasProjectionBias, const std::vector& projectionBias, bool hasInputLayerNormWeights, const std::vector& inputLayerNormWeights, bool hasForgetLayerNormWeights, const std::vector& forgetLayerNormWeights, bool hasCellLayerNormWeights, const std::vector& cellLayerNormWeights, bool hasOutputLayerNormWeights, const std::vector& outputLayerNormWeights, tflite::ActivationFunctionType activationFunction, float clippingThresCell, float clippingThresProj, bool isTimeMajor, float quantScale, int quantOffset = 0) { std::vector tensorInfo0{}; std::vector tensorInfoNumUnits{numUnits}; std::vector tensorInfoInputSize{numUnits, inputSize}; std::vector tensorInfoOutputSize{numUnits, outputSize}; std::vector inputShape; std::vector outputShape; if (isTimeMajor) { inputShape = {timeSize, batchSize, inputSize}; outputShape = {timeSize, batchSize, outputSize}; } else { inputShape = {batchSize, timeSize, inputSize}; outputShape = {batchSize, timeSize, outputSize}; } std::vector outputStateInDimensions{batchSize, outputSize}; std::vector cellStateInDimensions{batchSize, numUnits}; std::vector projectionWeightDimensions{outputSize, numUnits}; std::vector projectionBiasDimensions{outputSize}; std::vector operatorInputs; using namespace tflite; flatbuffers::FlatBufferBuilder flatBufferBuilder; std::vector> buffers; std::vector> tensors; auto quantizationParameters = CreateQuantizationParameters(flatBufferBuilder, 0, 0, flatBufferBuilder.CreateVector({ 1.0f }), flatBufferBuilder.CreateVector({ 0 })); auto weightQuantizationParameters = CreateQuantizationParameters(flatBufferBuilder, 0, 0, flatBufferBuilder.CreateVector({ quantScale }), flatBufferBuilder.CreateVector({ quantOffset })); buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}))); tensors.push_back(CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(inputShape.data(), inputShape.size()), ::tflite::TensorType_FLOAT32, buffers.size() - 1, flatBufferBuilder.CreateString("input_0"))); operatorInputs.push_back(buffers.size() - 1); if (hasInputToInputWeights) { buffers.push_back( CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector(reinterpret_cast(inputToInputWeights.data()), sizeof(T) * inputToInputWeights.size()))); tensors.push_back(CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(tensorInfoInputSize.data(), tensorInfoInputSize.size()), tensorType, buffers.size() - 1, flatBufferBuilder.CreateString("inputToInputWeights"), weightQuantizationParameters)); operatorInputs.push_back(buffers.size() - 1); } else { operatorInputs.push_back(kTfLiteOptionalTensor); } buffers.push_back( CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector(reinterpret_cast(inputToForgetWeights.data()), sizeof(T) * inputToForgetWeights.size()))); tensors.push_back(CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(tensorInfoInputSize.data(), tensorInfoInputSize.size()), tensorType, buffers.size() - 1, flatBufferBuilder.CreateString("inputToForgetWeights"), weightQuantizationParameters)); operatorInputs.push_back(buffers.size() - 1); buffers.push_back( CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector(reinterpret_cast(inputToCellWeights.data()), sizeof(T) * inputToCellWeights.size()))); tensors.push_back(CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(tensorInfoInputSize.data(), tensorInfoInputSize.size()), tensorType, buffers.size() - 1, flatBufferBuilder.CreateString("inputToCellWeights"), weightQuantizationParameters)); operatorInputs.push_back(buffers.size() - 1); buffers.push_back( CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector(reinterpret_cast(inputToOutputWeights.data()), sizeof(T) * inputToOutputWeights.size()))); tensors.push_back(CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(tensorInfoInputSize.data(), tensorInfoInputSize.size()), tensorType, buffers.size() - 1, flatBufferBuilder.CreateString("inputToOutputWeights"), weightQuantizationParameters)); operatorInputs.push_back(buffers.size() - 1); if (hasRecurrentToInputWeights) { buffers.push_back(CreateBuffer( flatBufferBuilder, flatBufferBuilder.CreateVector(reinterpret_cast(recurrentToInputWeights.data()), sizeof(T) * recurrentToInputWeights.size()))); tensors.push_back(CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(tensorInfoOutputSize.data(), tensorInfoOutputSize.size()), tensorType, buffers.size() - 1, flatBufferBuilder.CreateString("recurrentToInputWeights"), weightQuantizationParameters)); operatorInputs.push_back(buffers.size() - 1); } else { operatorInputs.push_back(kTfLiteOptionalTensor); } buffers.push_back( CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector(reinterpret_cast(recurrentToForgetWeights.data()), sizeof(T) * recurrentToForgetWeights.size()))); tensors.push_back(CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(tensorInfoOutputSize.data(), tensorInfoOutputSize.size()), tensorType, buffers.size() - 1, flatBufferBuilder.CreateString("recurrentToForgetWeights"), weightQuantizationParameters)); operatorInputs.push_back(buffers.size() - 1); buffers.push_back( CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector(reinterpret_cast(recurrentToCellWeights.data()), sizeof(T) * recurrentToCellWeights.size()))); tensors.push_back(CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(tensorInfoOutputSize.data(), tensorInfoOutputSize.size()), tensorType, buffers.size() - 1, flatBufferBuilder.CreateString("recurrentToCellWeights"), weightQuantizationParameters)); operatorInputs.push_back(buffers.size() - 1); buffers.push_back( CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector(reinterpret_cast(recurrentToOutputWeights.data()), sizeof(T) * recurrentToOutputWeights.size()))); tensors.push_back(CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(tensorInfoOutputSize.data(), tensorInfoOutputSize.size()), tensorType, buffers.size() - 1 , flatBufferBuilder.CreateString("recurrentToOutputWeights"), weightQuantizationParameters)); operatorInputs.push_back(buffers.size() - 1); if (hasCellToInputWeights) { buffers.push_back( CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector(reinterpret_cast(cellToInputWeights.data()), sizeof(T) * cellToInputWeights.size()))); tensors.push_back(CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(tensorInfoNumUnits.data(), tensorInfoNumUnits.size()), tensorType, buffers.size() - 1, flatBufferBuilder.CreateString("cellToInputWeights"), weightQuantizationParameters)); operatorInputs.push_back(buffers.size() - 1); } else { operatorInputs.push_back(kTfLiteOptionalTensor); } if (hasCellToForgetWeights) { buffers.push_back( CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector(reinterpret_cast(cellToForgetWeights.data()), sizeof(T) * cellToForgetWeights.size()))); tensors.push_back(CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(tensorInfoNumUnits.data(), tensorInfoNumUnits.size()), tensorType, buffers.size() - 1, flatBufferBuilder.CreateString("cellToForgetWeights"), weightQuantizationParameters)); operatorInputs.push_back(buffers.size() - 1); } else { operatorInputs.push_back(kTfLiteOptionalTensor); } if (hasCellToOutputWeights) { buffers.push_back( CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector(reinterpret_cast(cellToOutputWeights.data()), sizeof(T) * cellToOutputWeights.size()))); tensors.push_back(CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(tensorInfoNumUnits.data(), tensorInfoNumUnits.size()), tensorType, buffers.size() - 1, flatBufferBuilder.CreateString("cellToOutputWeights"), weightQuantizationParameters)); operatorInputs.push_back(buffers.size() - 1); } else { operatorInputs.push_back(kTfLiteOptionalTensor); } if (hasInputGateBias) { buffers.push_back( CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector(reinterpret_cast(inputGateBias.data()), sizeof(float) * inputGateBias.size()))); tensors.push_back(CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(tensorInfoNumUnits.data(), tensorInfoNumUnits.size()), ::tflite::TensorType_FLOAT32, buffers.size() - 1, flatBufferBuilder.CreateString("inputGateBias"))); operatorInputs.push_back(buffers.size() - 1); } else { operatorInputs.push_back(kTfLiteOptionalTensor); } buffers.push_back( CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector(reinterpret_cast(forgetGateBias.data()), sizeof(float) * forgetGateBias.size()))); tensors.push_back(CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(tensorInfoNumUnits.data(), tensorInfoNumUnits.size()), ::tflite::TensorType_FLOAT32, buffers.size() - 1, flatBufferBuilder.CreateString("forgetGateBias"))); operatorInputs.push_back(buffers.size() - 1); buffers.push_back( CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector(reinterpret_cast(cellBias.data()), sizeof(float) * cellBias.size()))); tensors.push_back(CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(tensorInfoNumUnits.data(), tensorInfoNumUnits.size()), ::tflite::TensorType_FLOAT32, buffers.size() - 1, flatBufferBuilder.CreateString("cellBias"))); operatorInputs.push_back(buffers.size() - 1); buffers.push_back( CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector(reinterpret_cast(outputGateBias.data()), sizeof(float) * outputGateBias.size()))); tensors.push_back(CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(tensorInfoNumUnits.data(), tensorInfoNumUnits.size()), ::tflite::TensorType_FLOAT32, buffers.size() - 1, flatBufferBuilder.CreateString("outputGateBias"))); operatorInputs.push_back(buffers.size() - 1); if (hasProjectionWeights) { buffers.push_back( CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector(reinterpret_cast(projectionWeights.data()), sizeof(T) * projectionWeights.size()))); tensors.push_back(CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(projectionWeightDimensions.data(), projectionWeightDimensions.size()), tensorType, buffers.size() - 1, flatBufferBuilder.CreateString("projectionWeights"), weightQuantizationParameters)); operatorInputs.push_back(buffers.size() - 1); } else { operatorInputs.push_back(kTfLiteOptionalTensor); } if (hasProjectionBias) { buffers.push_back( CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector(reinterpret_cast(projectionBias.data()), sizeof(float) * projectionBias.size()))); tensors.push_back(CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(projectionBiasDimensions.data(), projectionBiasDimensions.size()), ::tflite::TensorType_FLOAT32, buffers.size() - 1, flatBufferBuilder.CreateString("projectionBias"))); operatorInputs.push_back(buffers.size() - 1); } else { operatorInputs.push_back(kTfLiteOptionalTensor); } buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}))); tensors.push_back(CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(outputStateInDimensions.data(), outputStateInDimensions.size()), ::tflite::TensorType_FLOAT32, buffers.size() - 1, flatBufferBuilder.CreateString("outputStateInInfo"), quantizationParameters, true)); operatorInputs.push_back(buffers.size() - 1); buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}))); tensors.push_back(CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(cellStateInDimensions.data(), cellStateInDimensions.size()), ::tflite::TensorType_FLOAT32, buffers.size() - 1, flatBufferBuilder.CreateString("cellStateInInfo"), quantizationParameters, true)); operatorInputs.push_back(buffers.size() - 1); if (hasInputLayerNormWeights) { buffers.push_back( CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector( reinterpret_cast(inputLayerNormWeights.data()), sizeof(float) * inputLayerNormWeights.size()))); tensors.push_back(CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(tensorInfoNumUnits.data(), tensorInfoNumUnits.size()), ::tflite::TensorType_FLOAT32, buffers.size() - 1, flatBufferBuilder.CreateString("inputLayerNormWeights"))); operatorInputs.push_back(buffers.size() - 1); } else { operatorInputs.push_back(kTfLiteOptionalTensor); } if (hasForgetLayerNormWeights) { buffers.push_back( CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector( reinterpret_cast(forgetLayerNormWeights.data()), sizeof(float) * forgetLayerNormWeights.size()))); tensors.push_back(CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(tensorInfoNumUnits.data(), tensorInfoNumUnits.size()), ::tflite::TensorType_FLOAT32, buffers.size() - 1, flatBufferBuilder.CreateString("forgetLayerNormWeights"))); operatorInputs.push_back(buffers.size() - 1); } else { operatorInputs.push_back(kTfLiteOptionalTensor); } if (hasCellLayerNormWeights) { buffers.push_back( CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector(reinterpret_cast(cellLayerNormWeights.data()), sizeof(float) * cellLayerNormWeights.size()))); tensors.push_back(CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(tensorInfoNumUnits.data(), tensorInfoNumUnits.size()), ::tflite::TensorType_FLOAT32, buffers.size() - 1, flatBufferBuilder.CreateString("cellLayerNormWeights"))); operatorInputs.push_back(buffers.size() - 1); } else { operatorInputs.push_back(kTfLiteOptionalTensor); } if (hasOutputLayerNormWeights) { buffers.push_back( CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector( reinterpret_cast(outputLayerNormWeights.data()), sizeof(float) * outputLayerNormWeights.size()))); tensors.push_back(CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(tensorInfoNumUnits.data(), tensorInfoNumUnits.size()), ::tflite::TensorType_FLOAT32, buffers.size() - 1, flatBufferBuilder.CreateString("outputLayerNormWeights"))); operatorInputs.push_back(buffers.size() - 1); } else { operatorInputs.push_back(kTfLiteOptionalTensor); } int outputBufferId = buffers.size(); buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}))); tensors.push_back(CreateTensor(flatBufferBuilder, flatBufferBuilder.CreateVector(outputShape.data(), outputShape.size()), ::tflite::TensorType_FLOAT32, outputBufferId, flatBufferBuilder.CreateString("output"))); std::vector operatorOutputs; operatorOutputs.push_back(buffers.size() - 1); // create operator tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_UnidirectionalSequenceLSTMOptions; flatbuffers::Offset operatorBuiltinOptions = CreateUnidirectionalSequenceLSTMOptions(flatBufferBuilder, activationFunction, clippingThresCell, clippingThresProj, isTimeMajor).Union(); flatbuffers::Offset lstmOperator = CreateOperator(flatBufferBuilder, 0, flatBufferBuilder.CreateVector(operatorInputs.data(), operatorInputs.size()), flatBufferBuilder.CreateVector(operatorOutputs.data(), operatorOutputs.size()), operatorBuiltinOptionsType, operatorBuiltinOptions); flatbuffers::Offset subgraph = CreateSubGraph(flatBufferBuilder, flatBufferBuilder.CreateVector(tensors.data(), tensors.size()), flatBufferBuilder.CreateVector(operatorInputs.data(), operatorInputs.size()), flatBufferBuilder.CreateVector(operatorOutputs.data(), operatorOutputs.size()), flatBufferBuilder.CreateVector(&lstmOperator, 1)); flatbuffers::Offset modelDescription = flatBufferBuilder.CreateString("ArmnnDelegate: UnidirectionalSequenceLSTM Operator Model"); flatbuffers::Offset operatorCode = CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM); flatbuffers::Offset flatbufferModel = CreateModel(flatBufferBuilder, TFLITE_SCHEMA_VERSION, flatBufferBuilder.CreateVector(&operatorCode, 1), flatBufferBuilder.CreateVector(&subgraph, 1), modelDescription, flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); flatBufferBuilder.Finish(flatbufferModel); return std::vector(flatBufferBuilder.GetBufferPointer(), flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); } template void UnidirectionalSequenceLstmTestImpl(std::vector& backends, tflite::TensorType tensorType, int32_t batchSize, int32_t timeSize, int32_t inputSize, int32_t outputSize, int32_t numUnits, bool hasInputToInputWeights, const std::vector& inputToInputWeights, const std::vector& inputToForgetWeights, const std::vector& inputToCellWeights, const std::vector& inputToOutputWeights, bool hasRecurrentToInputWeights, const std::vector& recurrentToInputWeights, const std::vector& recurrentToForgetWeights, const std::vector& recurrentToCellWeights, const std::vector& recurrentToOutputWeights, bool hasCellToInputWeights, const std::vector& cellToInputWeights, bool hasCellToForgetWeights, const std::vector& cellToForgetWeights, bool hasCellToOutputWeights, const std::vector& cellToOutputWeights, bool hasInputGateBias, const std::vector& inputGateBias, const std::vector& forgetGateBias, const std::vector& cellBias, const std::vector& outputGateBias, bool hasProjectionWeights, const std::vector& projectionWeights, bool hasProjectionBias, const std::vector& projectionBias, bool hasInputLayerNormWeights, const std::vector& inputLayerNormWeights, bool hasForgetLayerNormWeights, const std::vector& forgetLayerNormWeights, bool hasCellLayerNormWeights, const std::vector& cellLayerNormWeights, bool hasOutputLayerNormWeights, const std::vector& outputLayerNormWeights, std::vector& inputValues, std::vector& expectedOutputValues, tflite::ActivationFunctionType activationFunction, float clippingThresCell, float clippingThresProj, bool isTimeMajor, float quantScale = 0.1f) { using namespace tflite; std::vector modelBuffer = CreateUnidirectionalSequenceLstmTfLiteModel(tensorType, batchSize, timeSize, inputSize, outputSize, numUnits, hasInputToInputWeights, inputToInputWeights, inputToForgetWeights, inputToCellWeights, inputToOutputWeights, hasRecurrentToInputWeights, recurrentToInputWeights, recurrentToForgetWeights, recurrentToCellWeights, recurrentToOutputWeights, hasCellToInputWeights, cellToInputWeights, hasCellToForgetWeights, cellToForgetWeights, hasCellToOutputWeights, cellToOutputWeights, hasInputGateBias, inputGateBias, forgetGateBias, cellBias, outputGateBias, hasProjectionWeights, projectionWeights, hasProjectionBias, projectionBias, hasInputLayerNormWeights, inputLayerNormWeights, hasForgetLayerNormWeights, forgetLayerNormWeights, hasCellLayerNormWeights, cellLayerNormWeights, hasOutputLayerNormWeights, outputLayerNormWeights, activationFunction, clippingThresCell, clippingThresProj, isTimeMajor, quantScale); const Model* tfLiteModel = GetModel(modelBuffer.data()); // Create TfLite Interpreters std::unique_ptr armnnDelegateInterpreter; CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) (&armnnDelegateInterpreter) == kTfLiteOk); CHECK(armnnDelegateInterpreter != nullptr); CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); std::unique_ptr tfLiteInterpreter; CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) (&tfLiteInterpreter) == kTfLiteOk); CHECK(tfLiteInterpreter != nullptr); CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); // Create the ArmNN Delegate armnnDelegate::DelegateOptions delegateOptions(backends); std::unique_ptr theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), armnnDelegate::TfLiteArmnnDelegateDelete); CHECK(theArmnnDelegate != nullptr); // Modify armnnDelegateInterpreter to use armnnDelegate CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk); // Set input data auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0]; auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateInputId); for (unsigned int i = 0; i < inputValues.size(); ++i) { tfLiteDelageInputData[i] = inputValues[i]; } auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0]; auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateInputId); for (unsigned int i = 0; i < inputValues.size(); ++i) { armnnDelegateInputData[i] = inputValues[i]; } // Run EnqueueWorkload CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); // Compare output data auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0]; auto tfLiteDelagateOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0]; auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor(armnnDelegateOutputId); if (tensorType == ::tflite::TensorType_INT8) { // Allow 2% tolerance for Quantized weights armnnDelegate::CompareData(expectedOutputValues.data(), armnnDelegateOutputData, expectedOutputValues.size(), 2); armnnDelegate::CompareData(expectedOutputValues.data(), tfLiteDelagateOutputData, expectedOutputValues.size(), 2); armnnDelegate::CompareData(tfLiteDelagateOutputData, armnnDelegateOutputData, expectedOutputValues.size(), 2); } else { armnnDelegate::CompareData(expectedOutputValues.data(), armnnDelegateOutputData, expectedOutputValues.size()); armnnDelegate::CompareData(expectedOutputValues.data(), tfLiteDelagateOutputData, expectedOutputValues.size()); armnnDelegate::CompareData(tfLiteDelagateOutputData, armnnDelegateOutputData, expectedOutputValues.size()); } } } // anonymous namespace