From 83b429107a4bb1fe84e756c29d8ad3771d4beeee Mon Sep 17 00:00:00 2001 From: Teresa Charlin Date: Thu, 7 Jul 2022 14:24:59 +0100 Subject: Revert "Revert "IVGCVSW-6650 Refactor ExecuteNetwork"" This reverts commit 1a7f033768acb27da11503bd29abb468d2e77f9e. List of fixes to be able to add this code again: * "emplacing_back" the vector inputTensors into the vector m_InputTensorsVec outside the for loop * GetIOInfo() uses IOptimizedNetwork instead of INetwork, where the infered shapes are not saved * Add missing data type Signed32 to SetupInputsAndOutputs() * PrintOutputTensors() prints the actual output without dequantizing * Add profilingDetailsMethod as input in networkProperties in ArmNNExecutor constructor * Fix typos Change-Id: I91de166f87228282db3efa27431fe91458834442 Signed-off-by: Teresa Charlin Change-Id: Ic6634d48892d11e5f146cdf285e1e333e93e9937 Signed-off-by: Francis Murtagh --- .../NetworkExecutionUtils.cpp | 309 ++------------------- 1 file changed, 30 insertions(+), 279 deletions(-) (limited to 'tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp') diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp index 6c74aaa6ed..e3c95d9312 100644 --- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp +++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp @@ -1,110 +1,12 @@ // -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "NetworkExecutionUtils.hpp" #include -#include -#include - -#if defined(ARMNN_SERIALIZER) -#include "armnnDeserializer/IDeserializer.hpp" -#endif -#if defined(ARMNN_TF_LITE_PARSER) -#include "armnnTfLiteParser/ITfLiteParser.hpp" -#endif -#if defined(ARMNN_ONNX_PARSER) -#include "armnnOnnxParser/IOnnxParser.hpp" -#endif - -template -auto ParseDataArray(std::istream& stream); - -template -auto ParseDataArray(std::istream& stream, - const float& quantizationScale, - const int32_t& quantizationOffset); - -template<> -auto ParseDataArray(std::istream& stream) -{ - return ParseArrayImpl(stream, [](const std::string& s) { return std::stof(s); }); -} - -template<> -auto ParseDataArray(std::istream& stream) -{ - return ParseArrayImpl(stream, [](const std::string& s) { return std::stoi(s); }); -} - -template<> -auto ParseDataArray(std::istream& stream) -{ - return ParseArrayImpl(stream, - [](const std::string& s) { return armnn::numeric_cast(std::stoi(s)); }); -} - -template<> -auto ParseDataArray(std::istream& stream) -{ - return ParseArrayImpl(stream, - [](const std::string& s) { return armnn::numeric_cast(std::stoi(s)); }); -} - - -template<> -auto ParseDataArray(std::istream& stream) -{ - return ParseArrayImpl(stream, - [](const std::string& s) { return armnn::numeric_cast(std::stoi(s)); }); -} - -template<> -auto ParseDataArray(std::istream& stream, - const float& quantizationScale, - const int32_t& quantizationOffset) -{ - return ParseArrayImpl(stream, - [&quantizationScale, &quantizationOffset](const std::string& s) - { - return armnn::numeric_cast( - armnn::Quantize(std::stof(s), - quantizationScale, - quantizationOffset)); - }); -} - -template<> -auto ParseDataArray(std::istream& stream, - const float& quantizationScale, - const int32_t& quantizationOffset) -{ - return ParseArrayImpl(stream, - [&quantizationScale, &quantizationOffset](const std::string& s) - { - return armnn::numeric_cast( - armnn::Quantize(std::stof(s), - quantizationScale, - quantizationOffset)); - }); -} - -template> -std::vector GenerateDummyTensorData(unsigned int numElements) -{ - return std::vector(numElements, static_cast(0)); -} - - -std::vector ParseArray(std::istream& stream) -{ - return ParseArrayImpl( - stream, - [](const std::string& s) { return armnn::numeric_cast(std::stoi(s)); }); -} - +#include std::vector ParseStringList(const std::string& inputString, const char* delimiter) { std::stringstream stream(inputString); @@ -112,189 +14,27 @@ std::vector ParseStringList(const std::string& inputString, const c return armnn::stringUtils::StringTrimCopy(s); }, delimiter); } - -TensorPrinter::TensorPrinter(const std::string& binding, - const armnn::TensorInfo& info, - const std::string& outputTensorFile, - bool dequantizeOutput, - const bool printToConsole) - : m_OutputBinding(binding) - , m_Scale(info.GetQuantizationScale()) - , m_Offset(info.GetQuantizationOffset()) - , m_OutputTensorFile(outputTensorFile) - , m_DequantizeOutput(dequantizeOutput) - , m_PrintToConsole(printToConsole) {} - -void TensorPrinter::operator()(const std::vector& values) -{ - if (m_PrintToConsole) - { - std::cout << m_OutputBinding << ": "; - ForEachValue(values, [](float value) - { - printf("%f ", value); - }); - printf("\n"); - } - WriteToFile(values); -} - -void TensorPrinter::operator()(const std::vector& values) +bool CheckInferenceTimeThreshold(const std::chrono::duration& duration, + const double& thresholdTime) { - if(m_DequantizeOutput) + ARMNN_LOG(info) << "Inference time: " << std::setprecision(2) + << std::fixed << duration.count() << " ms\n"; + // If thresholdTime == 0.0 (default), then it hasn't been supplied at command line + if (thresholdTime != 0.0) { - auto& scale = m_Scale; - auto& offset = m_Offset; - std::vector dequantizedValues; - ForEachValue(values, [&scale, &offset, &dequantizedValues](uint8_t value) - { - auto dequantizedValue = armnn::Dequantize(value, scale, offset); - dequantizedValues.push_back(dequantizedValue); - }); - - if (m_PrintToConsole) + ARMNN_LOG(info) << "Threshold time: " << std::setprecision(2) + << std::fixed << thresholdTime << " ms"; + auto thresholdMinusInference = thresholdTime - duration.count(); + ARMNN_LOG(info) << "Threshold time - Inference time: " << std::setprecision(2) + << std::fixed << thresholdMinusInference << " ms" << "\n"; + if (thresholdMinusInference < 0) { - std::cout << m_OutputBinding << ": "; - ForEachValue(dequantizedValues, [](float value) - { - printf("%f ", value); - }); - printf("\n"); + std::string errorMessage = "Elapsed inference time is greater than provided threshold time."; + ARMNN_LOG(fatal) << errorMessage; + return false; } - - WriteToFile(dequantizedValues); - } - else - { - const std::vector intValues(values.begin(), values.end()); - operator()(intValues); } -} - -void TensorPrinter::operator()(const std::vector& values) -{ - if (m_PrintToConsole) - { - std::cout << m_OutputBinding << ": "; - ForEachValue(values, [](int8_t value) - { - printf("%d ", value); - }); - printf("\n"); - } - WriteToFile(values); -} - -void TensorPrinter::operator()(const std::vector& values) -{ - if (m_PrintToConsole) - { - std::cout << m_OutputBinding << ": "; - ForEachValue(values, [](int value) - { - printf("%d ", value); - }); - printf("\n"); - } - WriteToFile(values); -} - -template -void TensorPrinter::ForEachValue(const Container& c, Delegate delegate) -{ - for (const auto& value : c) - { - delegate(value); - } -} - -template -void TensorPrinter::WriteToFile(const std::vector& values) -{ - if (!m_OutputTensorFile.empty()) - { - std::ofstream outputTensorFile; - outputTensorFile.open(m_OutputTensorFile, std::ofstream::out | std::ofstream::trunc); - if (outputTensorFile.is_open()) - { - outputTensorFile << m_OutputBinding << ": "; - std::copy(values.begin(), values.end(), std::ostream_iterator(outputTensorFile, " ")); - } - else - { - ARMNN_LOG(info) << "Output Tensor File: " << m_OutputTensorFile << " could not be opened!"; - } - outputTensorFile.close(); - } -} - -void PopulateTensorWithData(armnnUtils::TContainer& tensorData, - unsigned int numElements, - const std::string& dataTypeStr, - const armnn::Optional& qParams, - const armnn::Optional& dataFile) -{ - const bool readFromFile = dataFile.has_value() && !dataFile.value().empty(); - const bool quantizeData = qParams.has_value(); - - std::ifstream inputTensorFile; - if (readFromFile) - { - inputTensorFile = std::ifstream(dataFile.value()); - } - - if (dataTypeStr.compare("float") == 0) - { - if (quantizeData) - { - const float qScale = qParams.value().first; - const int qOffset = qParams.value().second; - - tensorData = readFromFile ? - ParseDataArray(inputTensorFile, qScale, qOffset) : - GenerateDummyTensorData(numElements); - } - else - { - tensorData = readFromFile ? - ParseDataArray(inputTensorFile) : - GenerateDummyTensorData(numElements); - } - } - else if (dataTypeStr.compare("int") == 0) - { - tensorData = readFromFile ? - ParseDataArray(inputTensorFile) : - GenerateDummyTensorData(numElements); - } - else if (dataTypeStr.compare("qsymms8") == 0) - { - tensorData = readFromFile ? - ParseDataArray(inputTensorFile) : - GenerateDummyTensorData(numElements); - } - else if (dataTypeStr.compare("qasymm8") == 0 || dataTypeStr.compare("qasymmu8") == 0) - { - tensorData = readFromFile ? - ParseDataArray(inputTensorFile) : - GenerateDummyTensorData(numElements); - } - else if (dataTypeStr.compare("qasymms8") == 0) - { - tensorData = readFromFile ? - ParseDataArray(inputTensorFile) : - GenerateDummyTensorData(numElements); - } - else - { - std::string errorMessage = "Unsupported tensor data type " + dataTypeStr; - ARMNN_LOG(fatal) << errorMessage; - - inputTensorFile.close(); - throw armnn::Exception(errorMessage); - } - - inputTensorFile.close(); + return true; } bool ValidatePath(const std::string& file, const bool expectFile) @@ -312,6 +52,13 @@ bool ValidatePath(const std::string& file, const bool expectFile) return true; } +std::vector ParseArray(std::istream& stream) +{ + return ParseArrayImpl( + stream, + [](const std::string& s) { return armnn::numeric_cast(std::stoi(s)); }); +} + bool ValidatePaths(const std::vector& fileVec, const bool expectFile) { bool allPathsValid = true; @@ -325,5 +72,9 @@ bool ValidatePaths(const std::vector& fileVec, const bool expectFil return allPathsValid; } - +void LogAndThrow(std::string eMsg) +{ + ARMNN_LOG(error) << eMsg; + throw armnn::Exception(eMsg); +} -- cgit v1.2.1