From 615e06f54a4c4139e81e289991ba4084aa2f69d3 Mon Sep 17 00:00:00 2001 From: Finn Williams Date: Mon, 20 Jun 2022 13:48:20 +0100 Subject: IVGCVSW-6650 Refactor ExecuteNetwork * Remove InferenceModel * Add automatic IO type, shape and name configuration * Depreciate various redundant options * Add internal output comparison Signed-off-by: Finn Williams Change-Id: I2eca248bc91e1655a99ed94990efb8059f541fa9 --- tests/ExecuteNetwork/TfliteExecutor.cpp | 251 ++++++++++++++++++++++++++++++++ 1 file changed, 251 insertions(+) create mode 100644 tests/ExecuteNetwork/TfliteExecutor.cpp (limited to 'tests/ExecuteNetwork/TfliteExecutor.cpp') diff --git a/tests/ExecuteNetwork/TfliteExecutor.cpp b/tests/ExecuteNetwork/TfliteExecutor.cpp new file mode 100644 index 0000000000..f7a3068d7b --- /dev/null +++ b/tests/ExecuteNetwork/TfliteExecutor.cpp @@ -0,0 +1,251 @@ +// +// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "TfliteExecutor.hpp" + +TfLiteExecutor::TfLiteExecutor(const ExecuteNetworkParams& params) : m_Params(params) +{ + std::unique_ptr model = + tflite::FlatBufferModel::BuildFromFile(m_Params.m_ModelPath.c_str()); + + m_TfLiteInterpreter = std::make_unique(); + tflite::ops::builtin::BuiltinOpResolver resolver; + + tflite::InterpreterBuilder builder(*model, resolver); + builder(&m_TfLiteInterpreter); + m_TfLiteInterpreter->AllocateTensors(); + + int status; + if (m_Params.m_TfLiteExecutor == ExecuteNetworkParams::TfLiteExecutor::ArmNNTfLiteDelegate) + { + // Create the Armnn Delegate + // Populate a DelegateOptions from the ExecuteNetworkParams. + armnnDelegate::DelegateOptions delegateOptions = m_Params.ToDelegateOptions(); + delegateOptions.SetExternalProfilingParams(delegateOptions.GetExternalProfilingParams()); + + std::unique_ptr + theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), + armnnDelegate::TfLiteArmnnDelegateDelete); + // Register armnn_delegate to TfLiteInterpreter + status = m_TfLiteInterpreter->ModifyGraphWithDelegate(std::move(theArmnnDelegate)); + if (status == kTfLiteError) + { + LogAndThrow("Could not register ArmNN TfLite Delegate to TfLiteInterpreter"); + } + } + else + { + std::cout << "Running on TfLite without ArmNN delegate\n"; + } + + armnn::Optional dataFile = m_Params.m_GenerateTensorData + ? armnn::EmptyOptional() + : armnn::MakeOptional(m_Params.m_InputTensorDataFilePaths[0]); + + const size_t numInputs = m_Params.m_InputNames.size(); + + for(unsigned int inputIndex = 0; inputIndex < numInputs; ++inputIndex) + { + int input = m_TfLiteInterpreter->inputs()[inputIndex]; + + TfLiteIntArray* inputDims = m_TfLiteInterpreter->tensor(input)->dims; + + unsigned int inputSize = 1; + for (unsigned int dim = 0; dim < static_cast(inputDims->size); ++dim) + { + inputSize *= inputDims->data[dim]; + } + + const auto& inputName = m_TfLiteInterpreter->input_tensor(input)->name; + const auto& dataType = m_TfLiteInterpreter->input_tensor(input)->type; + + switch (dataType) + { + case kTfLiteFloat32: + { + auto inputData = m_TfLiteInterpreter->typed_tensor(input); + PopulateTensorWithData(inputData, inputSize, dataFile, inputName); + break; + } + case kTfLiteInt32: + { + auto inputData = m_TfLiteInterpreter->typed_tensor(input); + PopulateTensorWithData(inputData, inputSize, dataFile, inputName); + break; + } + case kTfLiteUInt8: + { + auto inputData = m_TfLiteInterpreter->typed_tensor(input); + PopulateTensorWithData(inputData, inputSize, dataFile, inputName); + break; + } + case kTfLiteInt16: + { + auto inputData = m_TfLiteInterpreter->typed_tensor(input); + PopulateTensorWithData(inputData, inputSize, dataFile, inputName); + break; + } + case kTfLiteInt8: + { + auto inputData = m_TfLiteInterpreter->typed_tensor(input); + PopulateTensorWithData(inputData, inputSize, dataFile, inputName); + break; + } + default: + { + LogAndThrow("Unsupported input tensor data type"); + } + } + } +} + +std::vector TfLiteExecutor::Execute() +{ + int status = 0; + std::vector results; + for (size_t x = 0; x < m_Params.m_Iterations; x++) + { + // Start timer to record inference time in milliseconds. + const auto start_time = armnn::GetTimeNow(); + // Run the inference + status = m_TfLiteInterpreter->Invoke(); + const auto duration = armnn::GetTimeDuration(start_time); + + if (m_Params.m_DontPrintOutputs || m_Params.m_ReuseBuffers) + { + break; + } + // Print out the output + for (unsigned int outputIndex = 0; outputIndex < m_TfLiteInterpreter->outputs().size(); ++outputIndex) + { + auto tfLiteDelegateOutputId = m_TfLiteInterpreter->outputs()[outputIndex]; + TfLiteIntArray* outputDims = m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->dims; + // If we've been asked to write to a file then set a file output stream. Otherwise use stdout. + FILE* outputTensorFile = stdout; + if (!m_Params.m_OutputTensorFiles.empty()) + { + outputTensorFile = fopen(m_Params.m_OutputTensorFiles[outputIndex].c_str(), "w"); + if (outputTensorFile == NULL) + { + LogAndThrow("Specified output tensor file, \"" + m_Params.m_OutputTensorFiles[outputIndex] + + "\", cannot be created. Defaulting to stdout. Error was: " + std::strerror(errno)); + } + else + { + ARMNN_LOG(info) << "Writing output " << outputIndex << "' of iteration: " << x+1 << " to file: '" + << m_Params.m_OutputTensorFiles[outputIndex] << "'"; + } + } + long outputSize = 1; + for (unsigned int dim = 0; dim < static_cast(outputDims->size); ++dim) + { + outputSize *= outputDims->data[dim]; + } + + std::cout << m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->name << ": "; + results.push_back(m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->allocation); + + switch (m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->type) + { + + case kTfLiteFloat32: + { + auto tfLiteDelageOutputData = m_TfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); + + for (int i = 0; i < outputSize; ++i) + { + fprintf(outputTensorFile, "%f ", tfLiteDelageOutputData[i]); + } + break; + } + case kTfLiteInt32: + { + auto tfLiteDelageOutputData = m_TfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); + for (int i = 0; i < outputSize; ++i) + { + fprintf(outputTensorFile, "%d ", tfLiteDelageOutputData[i]); + } + break; + } + case kTfLiteUInt8: + { + auto tfLiteDelageOutputData = m_TfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); + for (int i = 0; i < outputSize; ++i) + { + fprintf(outputTensorFile, "%u ", tfLiteDelageOutputData[i]); + } + break; + } + case kTfLiteInt8: + { + auto tfLiteDelageOutputData = m_TfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); + for (int i = 0; i < outputSize; ++i) + { + fprintf(outputTensorFile, "%d ", tfLiteDelageOutputData[i]); + } + break; + } + default: + { + LogAndThrow("Unsupported output type"); + } + } + + std::cout << std::endl; + } + CheckInferenceTimeThreshold(duration, m_Params.m_ThresholdTime); + } + + std::cout << status; + return results; +} + +void TfLiteExecutor::CompareAndPrintResult(std::vector otherOutput) +{ + for (unsigned int outputIndex = 0; outputIndex < m_TfLiteInterpreter->outputs().size(); ++outputIndex) + { + auto tfLiteDelegateOutputId = m_TfLiteInterpreter->outputs()[outputIndex]; + float result = 0; + switch (m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->type) + { + case kTfLiteFloat32: + { + result = ComputeRMSE(m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->allocation, + otherOutput[outputIndex], + m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->bytes); + + break; + } + case kTfLiteInt32: + { + result = ComputeRMSE(m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->allocation, + otherOutput[outputIndex], + m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->bytes); + break; + } + case kTfLiteUInt8: + { + result = ComputeRMSE(m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->allocation, + otherOutput[outputIndex], + m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->bytes); + break; + } + case kTfLiteInt8: + { + result = ComputeRMSE(m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->allocation, + otherOutput[outputIndex], + m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->bytes); + break; + } + default: + { + } + } + + std::cout << "RMSE of " + << m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->name + << ": " << result << std::endl; + } +}; -- cgit v1.2.1