aboutsummaryrefslogtreecommitdiff
path: root/tests/NetworkExecutionUtils
diff options
context:
space:
mode:
authorNikhil Raj Arm <nikhil.raj@arm.com>2022-07-05 09:29:18 +0000
committerNikhil Raj <nikhil.raj@arm.com>2022-07-08 15:21:03 +0100
commitf4ccb1f6339a1e9ed573f188e7f14353167b5749 (patch)
treebb53a449cd42ed919022bd52b9e369a28d5a14d4 /tests/NetworkExecutionUtils
parentfd33a698ee3c588aa4064b70b7781ab25ff76f66 (diff)
downloadarmnn-f4ccb1f6339a1e9ed573f188e7f14353167b5749.tar.gz
Revert "IVGCVSW-6650 Refactor ExecuteNetwork"
This reverts commit 615e06f54a4c4139e81e289991ba4084aa2f69d3. Reason for revert: <Breaking nightlies and tests> Change-Id: I06a4a0119463188a653bb749033f78514645bd0c
Diffstat (limited to 'tests/NetworkExecutionUtils')
-rw-r--r--tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp309
-rw-r--r--tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp279
2 files changed, 332 insertions, 256 deletions
diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
index 2d3567bd24..6c74aaa6ed 100644
--- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
+++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
@@ -1,12 +1,110 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "NetworkExecutionUtils.hpp"
#include <armnnUtils/Filesystem.hpp>
-#include <iterator>
+#include <InferenceTest.hpp>
+#include <ResolveType.hpp>
+
+#if defined(ARMNN_SERIALIZER)
+#include "armnnDeserializer/IDeserializer.hpp"
+#endif
+#if defined(ARMNN_TF_LITE_PARSER)
+#include "armnnTfLiteParser/ITfLiteParser.hpp"
+#endif
+#if defined(ARMNN_ONNX_PARSER)
+#include "armnnOnnxParser/IOnnxParser.hpp"
+#endif
+
+template<armnn::DataType NonQuantizedType>
+auto ParseDataArray(std::istream& stream);
+
+template<armnn::DataType QuantizedType>
+auto ParseDataArray(std::istream& stream,
+ const float& quantizationScale,
+ const int32_t& quantizationOffset);
+
+template<>
+auto ParseDataArray<armnn::DataType::Float32>(std::istream& stream)
+{
+ return ParseArrayImpl<float>(stream, [](const std::string& s) { return std::stof(s); });
+}
+
+template<>
+auto ParseDataArray<armnn::DataType::Signed32>(std::istream& stream)
+{
+ return ParseArrayImpl<int>(stream, [](const std::string& s) { return std::stoi(s); });
+}
+
+template<>
+auto ParseDataArray<armnn::DataType::QAsymmS8>(std::istream& stream)
+{
+ return ParseArrayImpl<int8_t>(stream,
+ [](const std::string& s) { return armnn::numeric_cast<int8_t>(std::stoi(s)); });
+}
+
+template<>
+auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream)
+{
+ return ParseArrayImpl<uint8_t>(stream,
+ [](const std::string& s) { return armnn::numeric_cast<uint8_t>(std::stoi(s)); });
+}
+
+
+template<>
+auto ParseDataArray<armnn::DataType::QSymmS8>(std::istream& stream)
+{
+ return ParseArrayImpl<int8_t>(stream,
+ [](const std::string& s) { return armnn::numeric_cast<int8_t>(std::stoi(s)); });
+}
+
+template<>
+auto ParseDataArray<armnn::DataType::QAsymmS8>(std::istream& stream,
+ const float& quantizationScale,
+ const int32_t& quantizationOffset)
+{
+ return ParseArrayImpl<int8_t>(stream,
+ [&quantizationScale, &quantizationOffset](const std::string& s)
+ {
+ return armnn::numeric_cast<int8_t>(
+ armnn::Quantize<int8_t>(std::stof(s),
+ quantizationScale,
+ quantizationOffset));
+ });
+}
+
+template<>
+auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream,
+ const float& quantizationScale,
+ const int32_t& quantizationOffset)
+{
+ return ParseArrayImpl<uint8_t>(stream,
+ [&quantizationScale, &quantizationOffset](const std::string& s)
+ {
+ return armnn::numeric_cast<uint8_t>(
+ armnn::Quantize<uint8_t>(std::stof(s),
+ quantizationScale,
+ quantizationOffset));
+ });
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+std::vector<T> GenerateDummyTensorData(unsigned int numElements)
+{
+ return std::vector<T>(numElements, static_cast<T>(0));
+}
+
+
+std::vector<unsigned int> ParseArray(std::istream& stream)
+{
+ return ParseArrayImpl<unsigned int>(
+ stream,
+ [](const std::string& s) { return armnn::numeric_cast<unsigned int>(std::stoi(s)); });
+}
+
std::vector<std::string> ParseStringList(const std::string& inputString, const char* delimiter)
{
std::stringstream stream(inputString);
@@ -14,27 +112,189 @@ std::vector<std::string> ParseStringList(const std::string& inputString, const c
return armnn::stringUtils::StringTrimCopy(s); }, delimiter);
}
-bool CheckInferenceTimeThreshold(const std::chrono::duration<double, std::milli>& duration,
- const double& thresholdTime)
+
+TensorPrinter::TensorPrinter(const std::string& binding,
+ const armnn::TensorInfo& info,
+ const std::string& outputTensorFile,
+ bool dequantizeOutput,
+ const bool printToConsole)
+ : m_OutputBinding(binding)
+ , m_Scale(info.GetQuantizationScale())
+ , m_Offset(info.GetQuantizationOffset())
+ , m_OutputTensorFile(outputTensorFile)
+ , m_DequantizeOutput(dequantizeOutput)
+ , m_PrintToConsole(printToConsole) {}
+
+void TensorPrinter::operator()(const std::vector<float>& values)
+{
+ if (m_PrintToConsole)
+ {
+ std::cout << m_OutputBinding << ": ";
+ ForEachValue(values, [](float value)
+ {
+ printf("%f ", value);
+ });
+ printf("\n");
+ }
+ WriteToFile(values);
+}
+
+void TensorPrinter::operator()(const std::vector<uint8_t>& values)
{
- ARMNN_LOG(info) << "\nInference time: " << std::setprecision(2)
- << std::fixed << duration.count() << " ms\n";
- // If thresholdTime == 0.0 (default), then it hasn't been supplied at command line
- if (thresholdTime != 0.0)
+ if(m_DequantizeOutput)
{
- ARMNN_LOG(info) << "Threshold time: " << std::setprecision(2)
- << std::fixed << thresholdTime << " ms";
- auto thresholdMinusInference = thresholdTime - duration.count();
- ARMNN_LOG(info) << "Threshold time - Inference time: " << std::setprecision(2)
- << std::fixed << thresholdMinusInference << " ms" << "\n";
- if (thresholdMinusInference < 0)
+ auto& scale = m_Scale;
+ auto& offset = m_Offset;
+ std::vector<float> dequantizedValues;
+ ForEachValue(values, [&scale, &offset, &dequantizedValues](uint8_t value)
+ {
+ auto dequantizedValue = armnn::Dequantize(value, scale, offset);
+ dequantizedValues.push_back(dequantizedValue);
+ });
+
+ if (m_PrintToConsole)
{
- std::string errorMessage = "Elapsed inference time is greater than provided threshold time.";
- ARMNN_LOG(fatal) << errorMessage;
- return false;
+ std::cout << m_OutputBinding << ": ";
+ ForEachValue(dequantizedValues, [](float value)
+ {
+ printf("%f ", value);
+ });
+ printf("\n");
}
+
+ WriteToFile(dequantizedValues);
}
- return true;
+ else
+ {
+ const std::vector<int> intValues(values.begin(), values.end());
+ operator()(intValues);
+ }
+}
+
+void TensorPrinter::operator()(const std::vector<int8_t>& values)
+{
+ if (m_PrintToConsole)
+ {
+ std::cout << m_OutputBinding << ": ";
+ ForEachValue(values, [](int8_t value)
+ {
+ printf("%d ", value);
+ });
+ printf("\n");
+ }
+ WriteToFile(values);
+}
+
+void TensorPrinter::operator()(const std::vector<int>& values)
+{
+ if (m_PrintToConsole)
+ {
+ std::cout << m_OutputBinding << ": ";
+ ForEachValue(values, [](int value)
+ {
+ printf("%d ", value);
+ });
+ printf("\n");
+ }
+ WriteToFile(values);
+}
+
+template<typename Container, typename Delegate>
+void TensorPrinter::ForEachValue(const Container& c, Delegate delegate)
+{
+ for (const auto& value : c)
+ {
+ delegate(value);
+ }
+}
+
+template<typename T>
+void TensorPrinter::WriteToFile(const std::vector<T>& values)
+{
+ if (!m_OutputTensorFile.empty())
+ {
+ std::ofstream outputTensorFile;
+ outputTensorFile.open(m_OutputTensorFile, std::ofstream::out | std::ofstream::trunc);
+ if (outputTensorFile.is_open())
+ {
+ outputTensorFile << m_OutputBinding << ": ";
+ std::copy(values.begin(), values.end(), std::ostream_iterator<T>(outputTensorFile, " "));
+ }
+ else
+ {
+ ARMNN_LOG(info) << "Output Tensor File: " << m_OutputTensorFile << " could not be opened!";
+ }
+ outputTensorFile.close();
+ }
+}
+
+void PopulateTensorWithData(armnnUtils::TContainer& tensorData,
+ unsigned int numElements,
+ const std::string& dataTypeStr,
+ const armnn::Optional<QuantizationParams>& qParams,
+ const armnn::Optional<std::string>& dataFile)
+{
+ const bool readFromFile = dataFile.has_value() && !dataFile.value().empty();
+ const bool quantizeData = qParams.has_value();
+
+ std::ifstream inputTensorFile;
+ if (readFromFile)
+ {
+ inputTensorFile = std::ifstream(dataFile.value());
+ }
+
+ if (dataTypeStr.compare("float") == 0)
+ {
+ if (quantizeData)
+ {
+ const float qScale = qParams.value().first;
+ const int qOffset = qParams.value().second;
+
+ tensorData = readFromFile ?
+ ParseDataArray<armnn::DataType::QAsymmU8>(inputTensorFile, qScale, qOffset) :
+ GenerateDummyTensorData<armnn::DataType::QAsymmU8>(numElements);
+ }
+ else
+ {
+ tensorData = readFromFile ?
+ ParseDataArray<armnn::DataType::Float32>(inputTensorFile) :
+ GenerateDummyTensorData<armnn::DataType::Float32>(numElements);
+ }
+ }
+ else if (dataTypeStr.compare("int") == 0)
+ {
+ tensorData = readFromFile ?
+ ParseDataArray<armnn::DataType::Signed32>(inputTensorFile) :
+ GenerateDummyTensorData<armnn::DataType::Signed32>(numElements);
+ }
+ else if (dataTypeStr.compare("qsymms8") == 0)
+ {
+ tensorData = readFromFile ?
+ ParseDataArray<armnn::DataType::QSymmS8>(inputTensorFile) :
+ GenerateDummyTensorData<armnn::DataType::QSymmS8>(numElements);
+ }
+ else if (dataTypeStr.compare("qasymm8") == 0 || dataTypeStr.compare("qasymmu8") == 0)
+ {
+ tensorData = readFromFile ?
+ ParseDataArray<armnn::DataType::QAsymmU8>(inputTensorFile) :
+ GenerateDummyTensorData<armnn::DataType::QAsymmU8>(numElements);
+ }
+ else if (dataTypeStr.compare("qasymms8") == 0)
+ {
+ tensorData = readFromFile ?
+ ParseDataArray<armnn::DataType::QAsymmS8>(inputTensorFile) :
+ GenerateDummyTensorData<armnn::DataType::QAsymmS8>(numElements);
+ }
+ else
+ {
+ std::string errorMessage = "Unsupported tensor data type " + dataTypeStr;
+ ARMNN_LOG(fatal) << errorMessage;
+
+ inputTensorFile.close();
+ throw armnn::Exception(errorMessage);
+ }
+
+ inputTensorFile.close();
}
bool ValidatePath(const std::string& file, const bool expectFile)
@@ -52,13 +312,6 @@ bool ValidatePath(const std::string& file, const bool expectFile)
return true;
}
-std::vector<unsigned int> ParseArray(std::istream& stream)
-{
- return ParseArrayImpl<unsigned int>(
- stream,
- [](const std::string& s) { return armnn::numeric_cast<unsigned int>(std::stoi(s)); });
-}
-
bool ValidatePaths(const std::vector<std::string>& fileVec, const bool expectFile)
{
bool allPathsValid = true;
@@ -72,9 +325,5 @@ bool ValidatePaths(const std::vector<std::string>& fileVec, const bool expectFil
return allPathsValid;
}
-void LogAndThrow(std::string eMsg)
-{
- ARMNN_LOG(error) << eMsg;
- throw armnn::Exception(eMsg);
-}
+
diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
index 14d7fe5551..bc2868ab35 100644
--- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
+++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
@@ -1,83 +1,63 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
+#include <armnn/IRuntime.hpp>
+#include <armnn/Types.hpp>
#include <armnn/Logging.hpp>
#include <armnn/utility/StringUtils.hpp>
-#include <armnn/utility/NumericCast.hpp>
-#include <armnn/BackendRegistry.hpp>
+#include <armnnUtils/TContainer.hpp>
#include <iostream>
#include <fstream>
-#include <iomanip>
-#include <iterator>
-/**
- * Given a measured duration and a threshold time tell the user whether we succeeded or not.
- *
- * @param duration the measured inference duration.
- * @param thresholdTime the threshold time in milliseconds.
- * @return false if the measured time exceeded the threshold.
- */
-bool CheckInferenceTimeThreshold(const std::chrono::duration<double, std::milli>& duration,
- const double& thresholdTime);
-
-inline bool CheckRequestedBackendsAreValid(const std::vector<armnn::BackendId>& backendIds,
- armnn::Optional<std::string&> invalidBackendIds = armnn::EmptyOptional())
-{
- if (backendIds.empty())
- {
- return false;
- }
-
- armnn::BackendIdSet validBackendIds = armnn::BackendRegistryInstance().GetBackendIds();
-
- bool allValid = true;
- for (const auto& backendId : backendIds)
- {
- if (std::find(validBackendIds.begin(), validBackendIds.end(), backendId) == validBackendIds.end())
- {
- allValid = false;
- if (invalidBackendIds)
- {
- if (!invalidBackendIds.value().empty())
- {
- invalidBackendIds.value() += ", ";
- }
- invalidBackendIds.value() += backendId;
- }
- }
- }
- return allValid;
-}
std::vector<unsigned int> ParseArray(std::istream& stream);
/// Splits a given string at every accurance of delimiter into a vector of string
std::vector<std::string> ParseStringList(const std::string& inputString, const char* delimiter);
-/// Dequantize an array of a given type
-/// @param array Type erased array to dequantize
-/// @param numElements Elements in the array
-/// @param array Type erased array to dequantize
-template <typename T>
-std::vector<float> DequantizeArray(const void* array, unsigned int numElements, float scale, int32_t offset)
+struct TensorPrinter
{
- const T* quantizedArray = reinterpret_cast<const T*>(array);
- std::vector<float> dequantizedVector;
- dequantizedVector.reserve(numElements);
- for (unsigned int i = 0; i < numElements; ++i)
- {
- float f = armnn::Dequantize(*(quantizedArray + i), scale, offset);
- dequantizedVector.push_back(f);
- }
- return dequantizedVector;
-}
+ TensorPrinter(const std::string& binding,
+ const armnn::TensorInfo& info,
+ const std::string& outputTensorFile,
+ bool dequantizeOutput,
+ bool printToConsole = true);
+
+ void operator()(const std::vector<float>& values);
+
+ void operator()(const std::vector<uint8_t>& values);
+
+ void operator()(const std::vector<int>& values);
+
+ void operator()(const std::vector<int8_t>& values);
+
+private:
+ template<typename Container, typename Delegate>
+ void ForEachValue(const Container& c, Delegate delegate);
+
+ template<typename T>
+ void WriteToFile(const std::vector<T>& values);
+
+ std::string m_OutputBinding;
+ float m_Scale;
+ int m_Offset;
+ std::string m_OutputTensorFile;
+ bool m_DequantizeOutput;
+ bool m_PrintToConsole;
+};
+
+using QuantizationParams = std::pair<float, int32_t>;
-void LogAndThrow(std::string eMsg);
+void PopulateTensorWithData(armnnUtils::TContainer& tensorData,
+ unsigned int numElements,
+ const std::string& dataTypeStr,
+ const armnn::Optional<QuantizationParams>& qParams,
+ const armnn::Optional<std::string>& dataFile);
/**
* Verifies if the given string is a valid path. Reports invalid paths to std::err.
@@ -95,152 +75,6 @@ bool ValidatePath(const std::string& file, const bool expectFile);
* */
bool ValidatePaths(const std::vector<std::string>& fileVec, const bool expectFile);
-/// Returns a function of read the given type as a string
-template <typename Integer, typename std::enable_if_t<std::is_integral<Integer>::value>* = nullptr>
-std::function<Integer(const std::string&)> GetParseElementFunc()
-{
- return [](const std::string& s) { return armnn::numeric_cast<Integer>(std::stoi(s)); };
-}
-
-template <typename Float, std::enable_if_t<std::is_floating_point<Float>::value>* = nullptr>
-std::function<Float(const std::string&)> GetParseElementFunc()
-{
- return [](const std::string& s) { return std::stof(s); };
-}
-
-template <typename T>
-void PopulateTensorWithData(T* tensor,
- const unsigned int numElements,
- const armnn::Optional<std::string>& dataFile,
- const std::string& inputName)
-{
- const bool readFromFile = dataFile.has_value() && !dataFile.value().empty();
-
- std::ifstream inputTensorFile;
- if (!readFromFile)
- {
- std::fill(tensor, tensor + numElements, 0);
- return;
- }
- else
- {
- inputTensorFile = std::ifstream(dataFile.value());
- }
-
- auto parseElementFunc = GetParseElementFunc<T>();
- std::string line;
- unsigned int index = 0;
- while (std::getline(inputTensorFile, line))
- {
- std::vector<std::string> tokens = armnn::stringUtils::StringTokenizer(line, "\t ,:");
- for (const std::string& token : tokens)
- {
- if (!token.empty()) // See https://stackoverflow.com/questions/10437406/
- {
- try
- {
- if (index == numElements)
- {
- ARMNN_LOG(error) << "Number of elements: " << (index +1) << " in file \"" << dataFile.value()
- << "\" does not match number of elements: " << numElements
- << " for input \"" << inputName << "\".";
- }
- *(tensor + index) = parseElementFunc(token);
- index++;
- }
- catch (const std::exception&)
- {
- ARMNN_LOG(error) << "'" << token << "' is not a valid number. It has been ignored.";
- }
- }
- }
- }
-
- if (index != numElements)
- {
- ARMNN_LOG(error) << "Number of elements: " << (index +1) << " in file \"" << inputName
- << "\" does not match number of elements: " << numElements
- << " for input \"" << inputName << "\".";
- }
-}
-
-template<typename T>
-void WriteToFile(const std::string& outputTensorFileName,
- const std::string& outputName,
- const T* const array,
- const unsigned int numElements)
-{
- std::ofstream outputTensorFile;
- outputTensorFile.open(outputTensorFileName, std::ofstream::out | std::ofstream::trunc);
- if (outputTensorFile.is_open())
- {
- outputTensorFile << outputName << ": ";
- std::copy(array, array + numElements, std::ostream_iterator<T>(outputTensorFile, " "));
- }
- else
- {
- ARMNN_LOG(info) << "Output Tensor File: " << outputTensorFileName << " could not be opened!";
- }
- outputTensorFile.close();
-}
-
-struct OutputWriteInfo
-{
- const armnn::Optional<std::string>& m_OutputTensorFile;
- const std::string& m_OutputName;
- const armnn::Tensor& m_Tensor;
- const bool m_PrintTensor;
-};
-
-template <typename T>
-void PrintTensor(OutputWriteInfo& info, const char* formatString)
-{
- const T* array = reinterpret_cast<const T*>(info.m_Tensor.GetMemoryArea());
-
- if (info.m_OutputTensorFile.has_value())
- {
- WriteToFile(info.m_OutputTensorFile.value(),
- info.m_OutputName,
- array,
- info.m_Tensor.GetNumElements());
- }
-
- if (info.m_PrintTensor)
- {
- for (unsigned int i = 0; i < info.m_Tensor.GetNumElements(); i++)
- {
- printf(formatString, array[i]);
- }
- }
-}
-
-template <typename T>
-void PrintQuantizedTensor(OutputWriteInfo& info)
-{
- std::vector<float> dequantizedValues;
- auto tensor = info.m_Tensor;
- dequantizedValues = DequantizeArray<T>(tensor.GetMemoryArea(),
- tensor.GetNumElements(),
- tensor.GetInfo().GetQuantizationScale(),
- tensor.GetInfo().GetQuantizationOffset());
-
- if (info.m_OutputTensorFile.has_value())
- {
- WriteToFile(info.m_OutputTensorFile.value(),
- info.m_OutputName,
- dequantizedValues.data(),
- tensor.GetNumElements());
- }
-
- if (info.m_PrintTensor)
- {
- std::for_each(dequantizedValues.begin(), dequantizedValues.end(), [&](float value)
- {
- printf("%f ", value);
- });
- }
-}
-
template<typename T, typename TParseElementFunc>
std::vector<T> ParseArrayImpl(std::istream& stream, TParseElementFunc parseElementFunc, const char* chars = "\t ,:")
{
@@ -269,28 +103,21 @@ std::vector<T> ParseArrayImpl(std::istream& stream, TParseElementFunc parseEleme
return result;
}
-/// Compute the root-mean-square error (RMSE)
-/// @param expected
-/// @param actual
-/// @param size size of the tensor
-/// @return float the RMSE
-template<typename T>
-float ComputeRMSE(const void* expected, const void* actual, const size_t size)
+template <typename T, typename TParseElementFunc>
+void PopulateTensorWithDataGeneric(std::vector<T>& tensorData,
+ unsigned int numElements,
+ const armnn::Optional<std::string>& dataFile,
+ TParseElementFunc parseFunction)
{
- auto typedExpected = reinterpret_cast<const T*>(expected);
- auto typedActual = reinterpret_cast<const T*>(actual);
-
- T errorSum = 0;
+ const bool readFromFile = dataFile.has_value() && !dataFile.value().empty();
- for (unsigned int i = 0; i < size; i++)
+ std::ifstream inputTensorFile;
+ if (readFromFile)
{
- if (std::abs(typedExpected[i] - typedActual[i]) != 0)
- {
- std::cout << "";
- }
- errorSum += std::pow(std::abs(typedExpected[i] - typedActual[i]), 2);
+ inputTensorFile = std::ifstream(dataFile.value());
}
- float rmse = std::sqrt(armnn::numeric_cast<float>(errorSum) / armnn::numeric_cast<float>(size / sizeof(T)));
- return rmse;
-} \ No newline at end of file
+ tensorData = readFromFile ?
+ ParseArrayImpl<T>(inputTensorFile, parseFunction) :
+ std::vector<T>(numElements, static_cast<T>(0));
+}