aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFerran Balaguer <ferran.balaguer@arm.com>2019-02-08 17:09:55 +0000
committerMatteo Martincigh <matteo.martincigh@arm.com>2019-02-11 08:48:53 +0000
commitc602f29d57f34b6bf0805d379b2174667d8bf52f (patch)
tree50cdc475ec8732575c0cf2c56d4ced770215c4a2
parent9c5d33a26ebc4be391ae4da9de584be2e453c78f (diff)
downloadarmnn-c602f29d57f34b6bf0805d379b2174667d8bf52f.tar.gz
IVGCVSW-2529 DeepSpeech v1 test
Change-Id: Ieb99ac1aa347cee4b28b831753855c4614220648
-rwxr-xr-xsrc/armnnTfParser/TfParser.cpp4
-rw-r--r--tests/ExecuteNetwork/ExecuteNetwork.cpp276
-rw-r--r--tests/InferenceModel.hpp61
-rw-r--r--tests/InferenceTest.hpp28
-rw-r--r--tests/InferenceTest.inl48
-rw-r--r--tests/MobileNetSsdInferenceTest.hpp8
-rw-r--r--tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp28
-rw-r--r--tests/YoloInferenceTest.hpp2
8 files changed, 300 insertions, 155 deletions
diff --git a/src/armnnTfParser/TfParser.cpp b/src/armnnTfParser/TfParser.cpp
index 80962748ce..b5fe6be075 100755
--- a/src/armnnTfParser/TfParser.cpp
+++ b/src/armnnTfParser/TfParser.cpp
@@ -3160,12 +3160,12 @@ void TfParser::LoadNodeDef(const tensorflow::NodeDef& nodeDef, const tensorflow:
type = attr.type();
}
- if (type != tensorflow::DT_FLOAT && nodeDef.op() != "Const")
+ if ((type != tensorflow::DT_FLOAT && type != tensorflow::DT_INT32) && nodeDef.op() != "Const")
{
throw ParseException(
boost::str(
boost::format(
- "Currently only FLOAT is supported for tensorflow nodes (apart from Const). "
+ "Currently only FLOAT and INT32 are supported for tensorflow nodes (apart from Const). "
"Got %1% for Node %2% %3%")
% tensorflow::DataType_Name(type)
% nodeDef.name()
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index afde9860e2..bbab70b39a 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -27,6 +27,7 @@
#include <boost/algorithm/string/split.hpp>
#include <boost/algorithm/string/classification.hpp>
#include <boost/program_options.hpp>
+#include <boost/variant.hpp>
#include <iostream>
#include <fstream>
@@ -42,7 +43,7 @@ namespace
namespace po = boost::program_options;
template<typename T, typename TParseElementFunc>
-std::vector<T> ParseArrayImpl(std::istream& stream, TParseElementFunc parseElementFunc)
+std::vector<T> ParseArrayImpl(std::istream& stream, TParseElementFunc parseElementFunc, const char * chars = "\t ,:")
{
std::vector<T> result;
// Processes line-by-line.
@@ -53,7 +54,7 @@ std::vector<T> ParseArrayImpl(std::istream& stream, TParseElementFunc parseEleme
try
{
// Coverity fix: boost::split() may throw an exception of type boost::bad_function_call.
- boost::split(tokens, line, boost::algorithm::is_any_of("\t ,;:"), boost::token_compress_on);
+ boost::split(tokens, line, boost::algorithm::is_any_of(chars), boost::token_compress_on);
}
catch (const std::exception& e)
{
@@ -137,14 +138,17 @@ std::vector<unsigned int> ParseArray(std::istream& stream)
[](const std::string& s) { return boost::numeric_cast<unsigned int>(std::stoi(s)); });
}
-void PrintOutputData(const std::string& outputLayerName, const std::vector<float>& data)
+template<>
+std::vector<int> ParseArray(std::istream& stream)
{
- std::cout << outputLayerName << ": ";
- for (size_t i = 0; i < data.size(); i++)
- {
- printf("%f ", data[i]);
- }
- printf("\n");
+ return ParseArrayImpl<int>(stream, [](const std::string& s) { return std::stoi(s); });
+}
+
+std::vector<std::string> ParseInputString(const std::string& inputString, const char * chars)
+{
+ std::stringstream stream(inputString);
+
+ return ParseArrayImpl<std::string>(stream, [](const std::string& s) { return boost::trim_copy(s); }, chars);
}
void RemoveDuplicateDevices(std::vector<armnn::BackendId>& computeDevices)
@@ -172,26 +176,38 @@ template<typename TParser, typename TDataType>
int MainImpl(const char* modelPath,
bool isModelBinary,
const std::vector<armnn::BackendId>& computeDevices,
- const char* inputName,
- const armnn::TensorShape* inputTensorShape,
- const char* inputTensorDataFilePath,
- const char* outputName,
+ const std::vector<string>& inputNames,
+ const std::vector<std::unique_ptr<armnn::TensorShape>>& inputTensorShapes,
+ const std::vector<string>& inputTensorDataFilePaths,
+ const std::vector<string>& inputTypes,
+ const std::vector<string>& outputNames,
bool enableProfiling,
const size_t subgraphId,
const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
{
- using TContainer = std::vector<TDataType>;
+ using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
- // Loads input tensor.
- TContainer inputDataContainer;
+ std::vector<TContainer> inputDataContainers;
+
+ for(unsigned int i = 0; i < inputTensorDataFilePaths.size(); ++i)
{
- std::ifstream inputTensorFile(inputTensorDataFilePath);
- if (!inputTensorFile.good())
+ std::ifstream inputTensorFile(inputTensorDataFilePaths[i]);
+
+ if (inputTypes[i].compare("float") == 0)
+ {
+ inputDataContainers.push_back(ParseArray<float>(inputTensorFile));
+ }
+ else if (inputTypes[i].compare("int") == 0)
+ {
+ inputDataContainers.push_back(ParseArray<int>(inputTensorFile));;
+ }
+ else
{
- BOOST_LOG_TRIVIAL(fatal) << "Failed to load input tensor data file from " << inputTensorDataFilePath;
+ BOOST_LOG_TRIVIAL(fatal) << "Unsupported tensor data type \"" << inputTypes[i] << "\". ";
return EXIT_FAILURE;
}
- inputDataContainer = ParseArray<TDataType>(inputTensorFile);
+
+ inputTensorFile.close();
}
try
@@ -201,29 +217,49 @@ int MainImpl(const char* modelPath,
params.m_ModelPath = modelPath;
params.m_IsModelBinary = isModelBinary;
params.m_ComputeDevices = computeDevices;
- params.m_InputBindings = { inputName };
- params.m_InputShapes = { *inputTensorShape };
- params.m_OutputBindings = { outputName };
+
+ for(const std::string& inputName: inputNames)
+ {
+ params.m_InputBindings.push_back(inputName);
+ }
+
+ for(unsigned int i = 0; i < inputTensorShapes.size(); ++i)
+ {
+ params.m_InputShapes.push_back(*inputTensorShapes[i]);
+ }
+
+ for(const std::string& outputName: outputNames)
+ {
+ params.m_OutputBindings.push_back(outputName);
+ }
+
params.m_EnableProfiling = enableProfiling;
params.m_SubgraphId = subgraphId;
InferenceModel<TParser, TDataType> model(params, runtime);
- const size_t numOutputs = params.m_OutputBindings.size();
- const size_t containerSize = model.GetOutputSize();
-
- // Set up input data container
- std::vector<TContainer> inputData(1, std::move(inputDataContainer));
+ const size_t numOutputs = params.m_OutputBindings.size();
+ std::vector<TContainer> outputDataContainers;
- // Set up output data container
- std::vector<TContainer> outputData(numOutputs, TContainer(containerSize));
+ for (unsigned int i = 0; i < numOutputs; ++i)
+ {
+ outputDataContainers.push_back(std::vector<float>(model.GetOutputSize(i)));
+ }
- // Execute model
- model.Run(inputData, outputData);
+ model.Run(inputDataContainers, outputDataContainers);
// Print output tensors
for (size_t i = 0; i < numOutputs; i++)
{
- PrintOutputData(params.m_OutputBindings[i], outputData[i]);
+ boost::apply_visitor([&](auto&& value)
+ {
+ std::cout << params.m_OutputBindings[i] << ": ";
+ for (size_t i = 0; i < value.size(); ++i)
+ {
+ printf("%f ", static_cast<float>(value[i]));
+ }
+ printf("\n");
+ },
+ outputDataContainers[i]);
}
}
catch (armnn::Exception const& e)
@@ -236,17 +272,26 @@ int MainImpl(const char* modelPath,
}
// This will run a test
-int RunTest(const std::string& modelFormat,
- const std::string& inputTensorShapeStr,
+int RunTest(const std::string& format,
+ const std::string& inputTensorShapesStr,
const vector<armnn::BackendId>& computeDevice,
- const std::string& modelPath,
- const std::string& inputName,
- const std::string& inputTensorDataFilePath,
- const std::string& outputName,
+ const std::string& path,
+ const std::string& inputNames,
+ const std::string& inputTensorDataFilePaths,
+ const std::string& inputTypes,
+ const std::string& outputNames,
bool enableProfiling,
const size_t subgraphId,
const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
{
+ std::string modelFormat = boost::trim_copy(format);
+ std::string modelPath = boost::trim_copy(path);
+ std::vector<std::string> inputNamesVector = ParseInputString(inputNames, ",");
+ std::vector<std::string> inputTensorShapesVector = ParseInputString(inputTensorShapesStr, ";");
+ std::vector<std::string> inputTensorDataFilePathsVector = ParseInputString(inputTensorDataFilePaths, ",");
+ std::vector<std::string> outputNamesVector = ParseInputString(outputNames, ",");
+ std::vector<std::string> inputTypesVector = ParseInputString(inputTypes, ",");
+
// Parse model binary flag from the model-format string we got from the command-line
bool isModelBinary;
if (modelFormat.find("bin") != std::string::npos)
@@ -263,22 +308,55 @@ int RunTest(const std::string& modelFormat,
return EXIT_FAILURE;
}
- // Parse input tensor shape from the string we got from the command-line.
- std::unique_ptr<armnn::TensorShape> inputTensorShape;
- if (!inputTensorShapeStr.empty())
+ if ((inputTensorShapesVector.size() != 0) && (inputTensorShapesVector.size() != inputNamesVector.size()))
{
- std::stringstream ss(inputTensorShapeStr);
- std::vector<unsigned int> dims = ParseArray<unsigned int>(ss);
+ BOOST_LOG_TRIVIAL(fatal) << "input-name and input-tensor-shape must have the same amount of elements.";
+ return EXIT_FAILURE;
+ }
- try
+ if ((inputTensorDataFilePathsVector.size() != 0) &&
+ (inputTensorDataFilePathsVector.size() != inputNamesVector.size()))
+ {
+ BOOST_LOG_TRIVIAL(fatal) << "input-name and input-tensor-data must have the same amount of elements.";
+ return EXIT_FAILURE;
+ }
+
+ if (inputTypesVector.size() == 0)
+ {
+ //Defaults the value of all inputs to "float"
+ for(unsigned int i = 0; i < inputNamesVector.size(); ++i)
{
- // Coverity fix: An exception of type armnn::InvalidArgumentException is thrown and never caught.
- inputTensorShape = std::make_unique<armnn::TensorShape>(dims.size(), dims.data());
+ inputTypesVector.push_back("float");
}
- catch (const armnn::InvalidArgumentException& e)
+ }
+ else if ((inputTypesVector.size() != 0) && (inputTypesVector.size() != inputNamesVector.size()))
+ {
+ BOOST_LOG_TRIVIAL(fatal) << "input-name and input-type must have the same amount of elements.";
+ return EXIT_FAILURE;
+ }
+
+ // Parse input tensor shape from the string we got from the command-line.
+ std::vector<std::unique_ptr<armnn::TensorShape>> inputTensorShapes;
+
+ if (!inputTensorShapesVector.empty())
+ {
+ inputTensorShapes.reserve(inputTensorShapesVector.size());
+
+ for(const std::string& shape : inputTensorShapesVector)
{
- BOOST_LOG_TRIVIAL(fatal) << "Cannot create tensor shape: " << e.what();
- return EXIT_FAILURE;
+ std::stringstream ss(shape);
+ std::vector<unsigned int> dims = ParseArray<unsigned int>(ss);
+
+ try
+ {
+ // Coverity fix: An exception of type armnn::InvalidArgumentException is thrown and never caught.
+ inputTensorShapes.push_back(std::make_unique<armnn::TensorShape>(dims.size(), dims.data()));
+ }
+ catch (const armnn::InvalidArgumentException& e)
+ {
+ BOOST_LOG_TRIVIAL(fatal) << "Cannot create tensor shape: " << e.what();
+ return EXIT_FAILURE;
+ }
}
}
@@ -287,9 +365,9 @@ int RunTest(const std::string& modelFormat,
{
#if defined(ARMNN_CAFFE_PARSER)
return MainImpl<armnnCaffeParser::ICaffeParser, float>(modelPath.c_str(), isModelBinary, computeDevice,
- inputName.c_str(), inputTensorShape.get(),
- inputTensorDataFilePath.c_str(), outputName.c_str(),
- enableProfiling, subgraphId, runtime);
+ inputNamesVector, inputTensorShapes,
+ inputTensorDataFilePathsVector, inputTypesVector,
+ outputNamesVector, enableProfiling, subgraphId, runtime);
#else
BOOST_LOG_TRIVIAL(fatal) << "Not built with Caffe parser support.";
return EXIT_FAILURE;
@@ -299,9 +377,9 @@ int RunTest(const std::string& modelFormat,
{
#if defined(ARMNN_ONNX_PARSER)
return MainImpl<armnnOnnxParser::IOnnxParser, float>(modelPath.c_str(), isModelBinary, computeDevice,
- inputName.c_str(), inputTensorShape.get(),
- inputTensorDataFilePath.c_str(), outputName.c_str(),
- enableProfiling, subgraphId, runtime);
+ inputNamesVector, inputTensorShapes,
+ inputTensorDataFilePathsVector, inputTypesVector,
+ outputNamesVector, enableProfiling, subgraphId, runtime);
#else
BOOST_LOG_TRIVIAL(fatal) << "Not built with Onnx parser support.";
return EXIT_FAILURE;
@@ -311,9 +389,9 @@ int RunTest(const std::string& modelFormat,
{
#if defined(ARMNN_TF_PARSER)
return MainImpl<armnnTfParser::ITfParser, float>(modelPath.c_str(), isModelBinary, computeDevice,
- inputName.c_str(), inputTensorShape.get(),
- inputTensorDataFilePath.c_str(), outputName.c_str(),
- enableProfiling, subgraphId, runtime);
+ inputNamesVector, inputTensorShapes,
+ inputTensorDataFilePathsVector, inputTypesVector,
+ outputNamesVector, enableProfiling, subgraphId, runtime);
#else
BOOST_LOG_TRIVIAL(fatal) << "Not built with Tensorflow parser support.";
return EXIT_FAILURE;
@@ -329,9 +407,10 @@ int RunTest(const std::string& modelFormat,
return EXIT_FAILURE;
}
return MainImpl<armnnTfLiteParser::ITfLiteParser, float>(modelPath.c_str(), isModelBinary, computeDevice,
- inputName.c_str(), inputTensorShape.get(),
- inputTensorDataFilePath.c_str(), outputName.c_str(),
- enableProfiling, subgraphId, runtime);
+ inputNamesVector, inputTensorShapes,
+ inputTensorDataFilePathsVector, inputTypesVector,
+ outputNamesVector, enableProfiling, subgraphId,
+ runtime);
#else
BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat <<
"'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'";
@@ -351,10 +430,11 @@ int RunCsvTest(const armnnUtils::CsvRow &csvRow,
{
std::string modelFormat;
std::string modelPath;
- std::string inputName;
- std::string inputTensorShapeStr;
- std::string inputTensorDataFilePath;
- std::string outputName;
+ std::string inputNames;
+ std::string inputTensorShapes;
+ std::string inputTensorDataFilePaths;
+ std::string outputNames;
+ std::string inputTypes;
size_t subgraphId = 0;
@@ -372,15 +452,21 @@ int RunCsvTest(const armnnUtils::CsvRow &csvRow,
" .onnx")
("compute,c", po::value<std::vector<armnn::BackendId>>()->multitoken(),
backendsMessage.c_str())
- ("input-name,i", po::value(&inputName), "Identifier of the input tensor in the network.")
+ ("input-name,i", po::value(&inputNames), "Identifier of the input tensors in the network separated by comma.")
("subgraph-number,n", po::value<size_t>(&subgraphId)->default_value(0), "Id of the subgraph to be "
- "executed. Defaults to 0")
- ("input-tensor-shape,s", po::value(&inputTensorShapeStr),
- "The shape of the input tensor in the network as a flat array of integers separated by whitespace. "
+ "executed. Defaults to 0.")
+ ("input-tensor-shape,s", po::value(&inputTensorShapes),
+ "The shape of the input tensors in the network as a flat array of integers separated by comma. "
+ "Several shapes can be passed separating them by semicolon. "
"This parameter is optional, depending on the network.")
- ("input-tensor-data,d", po::value(&inputTensorDataFilePath),
- "Path to a file containing the input data as a flat array separated by whitespace.")
- ("output-name,o", po::value(&outputName), "Identifier of the output tensor in the network.");
+ ("input-tensor-data,d", po::value(&inputTensorDataFilePaths),
+ "Path to files containing the input data as a flat array separated by whitespace. "
+ "Several paths can be passed separating them by comma.")
+ ("input-type,y",po::value(&inputTypes), "The type of the input tensors in the network separated by comma. "
+ "If unset, defaults to \"float\" for all defined inputs. "
+ "Accepted values (float or int).")
+ ("output-name,o", po::value(&outputNames),
+ "Identifier of the output tensors in the network separated by comma.");
}
catch (const std::exception& e)
{
@@ -415,14 +501,6 @@ int RunCsvTest(const armnnUtils::CsvRow &csvRow,
return EXIT_FAILURE;
}
- // Remove leading and trailing whitespaces from the parsed arguments.
- boost::trim(modelFormat);
- boost::trim(modelPath);
- boost::trim(inputName);
- boost::trim(inputTensorShapeStr);
- boost::trim(inputTensorDataFilePath);
- boost::trim(outputName);
-
// Get the preferred order of compute devices.
std::vector<armnn::BackendId> computeDevices = vm["compute"].as<std::vector<armnn::BackendId>>();
@@ -438,8 +516,8 @@ int RunCsvTest(const armnnUtils::CsvRow &csvRow,
return EXIT_FAILURE;
}
- return RunTest(modelFormat, inputTensorShapeStr, computeDevices,
- modelPath, inputName, inputTensorDataFilePath, outputName, enableProfiling, subgraphId, runtime);
+ return RunTest(modelFormat, inputTensorShapes, computeDevices, modelPath, inputNames, inputTensorDataFilePaths,
+ inputTypes, outputNames, enableProfiling, subgraphId);
}
int main(int argc, const char* argv[])
@@ -457,10 +535,11 @@ int main(int argc, const char* argv[])
std::string modelFormat;
std::string modelPath;
- std::string inputName;
- std::string inputTensorShapeStr;
- std::string inputTensorDataFilePath;
- std::string outputName;
+ std::string inputNames;
+ std::string inputTensorShapes;
+ std::string inputTensorDataFilePaths;
+ std::string outputNames;
+ std::string inputTypes;
size_t subgraphId = 0;
@@ -483,15 +562,22 @@ int main(int argc, const char* argv[])
" .tflite, .onnx")
("compute,c", po::value<std::vector<std::string>>()->multitoken(),
backendsMessage.c_str())
- ("input-name,i", po::value(&inputName), "Identifier of the input tensor in the network.")
+ ("input-name,i", po::value(&inputNames),
+ "Identifier of the input tensors in the network separated by comma.")
("subgraph-number,x", po::value<size_t>(&subgraphId)->default_value(0), "Id of the subgraph to be executed."
"Defaults to 0")
- ("input-tensor-shape,s", po::value(&inputTensorShapeStr),
- "The shape of the input tensor in the network as a flat array of integers separated by whitespace. "
+ ("input-tensor-shape,s", po::value(&inputTensorShapes),
+ "The shape of the input tensors in the network as a flat array of integers separated by comma. "
+ "Several shapes can be passed separating them by semicolon. "
"This parameter is optional, depending on the network.")
- ("input-tensor-data,d", po::value(&inputTensorDataFilePath),
- "Path to a file containing the input data as a flat array separated by whitespace.")
- ("output-name,o", po::value(&outputName), "Identifier of the output tensor in the network.")
+ ("input-tensor-data,d", po::value(&inputTensorDataFilePaths),
+ "Path to files containing the input data as a flat array separated by whitespace. "
+ "Several paths can be passed separating them by comma. ")
+ ("input-type,y",po::value(&inputTypes), "The type of the input tensors in the network separated by comma. "
+ "If unset, defaults to \"float\" for all defined inputs. "
+ "Accepted values (float or int)")
+ ("output-name,o", po::value(&outputNames),
+ "Identifier of the output tensors in the network separated by comma.")
("event-based-profiling,e", po::bool_switch()->default_value(false),
"Enables built in profiler. If unset, defaults to off.");
}
@@ -632,7 +718,7 @@ int main(int argc, const char* argv[])
return EXIT_FAILURE;
}
- return RunTest(modelFormat, inputTensorShapeStr, computeDevices,
- modelPath, inputName, inputTensorDataFilePath, outputName, enableProfiling, subgraphId);
+ return RunTest(modelFormat, inputTensorShapes, computeDevices, modelPath, inputNames, inputTensorDataFilePaths,
+ inputTypes, outputNames, enableProfiling, subgraphId);
}
}
diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp
index 7e338669c7..eb5f708c81 100644
--- a/tests/InferenceModel.hpp
+++ b/tests/InferenceModel.hpp
@@ -24,6 +24,7 @@
#include <boost/program_options.hpp>
#include <boost/filesystem.hpp>
#include <boost/lexical_cast.hpp>
+#include <boost/variant.hpp>
#include <algorithm>
#include <iterator>
@@ -266,13 +267,17 @@ inline armnn::InputTensors MakeInputTensors(
const InferenceModelInternal::BindingPointInfo& inputBinding = inputBindings[i];
const TContainer& inputData = inputDataContainers[i];
- if (inputData.size() != inputBinding.second.GetNumElements())
- {
- throw armnn::Exception("Input tensor has incorrect size");
- }
-
- armnn::ConstTensor inputTensor(inputBinding.second, inputData.data());
- inputTensors.push_back(std::make_pair(inputBinding.first, inputTensor));
+ boost::apply_visitor([&](auto&& value)
+ {
+ if (value.size() != inputBinding.second.GetNumElements())
+ {
+ throw armnn::Exception("Input tensor has incorrect size");
+ }
+
+ armnn::ConstTensor inputTensor(inputBinding.second, value.data());
+ inputTensors.push_back(std::make_pair(inputBinding.first, inputTensor));
+ },
+ inputData);
}
return inputTensors;
@@ -297,13 +302,17 @@ inline armnn::OutputTensors MakeOutputTensors(
const InferenceModelInternal::BindingPointInfo& outputBinding = outputBindings[i];
TContainer& outputData = outputDataContainers[i];
- if (outputData.size() != outputBinding.second.GetNumElements())
- {
- throw armnn::Exception("Output tensor has incorrect size");
- }
-
- armnn::Tensor outputTensor(outputBinding.second, outputData.data());
- outputTensors.push_back(std::make_pair(outputBinding.first, outputTensor));
+ boost::apply_visitor([&](auto&& value)
+ {
+ if (value.size() != outputBinding.second.GetNumElements())
+ {
+ throw armnn::Exception("Output tensor has incorrect size");
+ }
+
+ armnn::Tensor outputTensor(outputBinding.second, value.data());
+ outputTensors.push_back(std::make_pair(outputBinding.first, outputTensor));
+ },
+ outputData);
}
return outputTensors;
@@ -317,7 +326,7 @@ public:
using Params = InferenceModelInternal::Params;
using BindingPointInfo = InferenceModelInternal::BindingPointInfo;
using QuantizationParams = InferenceModelInternal::QuantizationParams;
- using TContainer = std::vector<TDataType>;
+ using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
struct CommandLineOptions
{
@@ -439,16 +448,22 @@ public:
void Run(const std::vector<TContainer>& inputContainers, std::vector<TContainer>& outputContainers)
{
- for (unsigned int i = 0; i < outputContainers.size(); i++)
+ for (unsigned int i = 0; i < outputContainers.size(); ++i)
{
const unsigned int expectedOutputDataSize = GetOutputSize(i);
- const unsigned int actualOutputDataSize = boost::numeric_cast<unsigned int>(outputContainers[i].size());
- if (actualOutputDataSize < expectedOutputDataSize)
+
+ boost::apply_visitor([expectedOutputDataSize, i](auto&& value)
{
- unsigned int outputIndex = boost::numeric_cast<unsigned int>(i);
- throw armnn::Exception(boost::str(boost::format("Not enough data for output #%1%: expected "
- "%2% elements, got %3%") % outputIndex % expectedOutputDataSize % actualOutputDataSize));
- }
+ const unsigned int actualOutputDataSize = boost::numeric_cast<unsigned int>(value.size());
+ if (actualOutputDataSize < expectedOutputDataSize)
+ {
+ unsigned int outputIndex = boost::numeric_cast<unsigned int>(i);
+ throw armnn::Exception(
+ boost::str(boost::format("Not enough data for output #%1%: expected "
+ "%2% elements, got %3%") % outputIndex % expectedOutputDataSize % actualOutputDataSize));
+ }
+ },
+ outputContainers[i]);
}
std::shared_ptr<armnn::IProfiler> profiler = m_Runtime->GetProfiler(m_NetworkIdentifier);
@@ -531,4 +546,4 @@ private:
{
return ::MakeOutputTensors(m_OutputBindings, outputDataContainers);
}
-}; \ No newline at end of file
+};
diff --git a/tests/InferenceTest.hpp b/tests/InferenceTest.hpp
index 3c22df9a5e..91a65ea494 100644
--- a/tests/InferenceTest.hpp
+++ b/tests/InferenceTest.hpp
@@ -100,7 +100,7 @@ template <typename TModel>
class InferenceModelTestCase : public IInferenceTestCase
{
public:
- using TContainer = std::vector<typename TModel::DataType>;
+ using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
InferenceModelTestCase(TModel& model,
unsigned int testCaseId,
@@ -112,11 +112,11 @@ public:
{
// Initialize output vector
const size_t numOutputs = outputSizes.size();
- m_Outputs.resize(numOutputs);
+ m_Outputs.reserve(numOutputs);
for (size_t i = 0; i < numOutputs; i++)
{
- m_Outputs[i].resize(outputSizes[i]);
+ m_Outputs.push_back(std::vector<typename TModel::DataType>(outputSizes[i]));
}
}
@@ -147,6 +147,12 @@ struct ToFloat<float>
// assuming that float models are not quantized
return value;
}
+
+ static inline float Convert(int value, const InferenceModelInternal::QuantizationParams &)
+ {
+ // assuming that float models are not quantized
+ return static_cast<float>(value);
+ }
};
template <>
@@ -159,6 +165,22 @@ struct ToFloat<uint8_t>
quantizationParams.first,
quantizationParams.second);
}
+
+ static inline float Convert(int value,
+ const InferenceModelInternal::QuantizationParams & quantizationParams)
+ {
+ return armnn::Dequantize<uint8_t>(static_cast<uint8_t>(value),
+ quantizationParams.first,
+ quantizationParams.second);
+ }
+
+ static inline float Convert(float value,
+ const InferenceModelInternal::QuantizationParams & quantizationParams)
+ {
+ return armnn::Dequantize<uint8_t>(static_cast<uint8_t>(value),
+ quantizationParams.first,
+ quantizationParams.second);
+ }
};
template <typename TTestCaseDatabase, typename TModel>
diff --git a/tests/InferenceTest.inl b/tests/InferenceTest.inl
index 07a20d5a13..538720bd83 100644
--- a/tests/InferenceTest.inl
+++ b/tests/InferenceTest.inl
@@ -28,6 +28,7 @@ namespace armnn
namespace test
{
+using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
template <typename TTestCaseDatabase, typename TModel>
ClassifierTestCase<TTestCaseDatabase, TModel>::ClassifierTestCase(
@@ -39,7 +40,8 @@ ClassifierTestCase<TTestCaseDatabase, TModel>::ClassifierTestCase(
unsigned int testCaseId,
unsigned int label,
std::vector<typename TModel::DataType> modelInput)
- : InferenceModelTestCase<TModel>(model, testCaseId, { std::move(modelInput) }, { model.GetOutputSize() })
+ : InferenceModelTestCase<TModel>(
+ model, testCaseId, std::vector<TContainer>{ modelInput }, { model.GetOutputSize() })
, m_Label(label)
, m_QuantizationParams(model.GetQuantizationParams())
, m_NumInferencesRef(numInferencesRef)
@@ -58,21 +60,26 @@ TestCaseResult ClassifierTestCase<TTestCaseDatabase, TModel>::ProcessResult(cons
std::map<float,int> resultMap;
{
int index = 0;
- for (const auto & o : output)
- {
- float prob = ToFloat<typename TModel::DataType>::Convert(o, m_QuantizationParams);
- int classification = index++;
-
- // Take the first class with each probability
- // This avoids strange results when looping over batched results produced
- // with identical test data.
- std::map<float, int>::iterator lb = resultMap.lower_bound(prob);
- if (lb == resultMap.end() ||
- !resultMap.key_comp()(prob, lb->first)) {
- // If the key is not already in the map, insert it.
- resultMap.insert(lb, std::map<float, int>::value_type(prob, classification));
- }
- }
+
+ boost::apply_visitor([&](auto&& value)
+ {
+ for (const auto & o : value)
+ {
+ float prob = ToFloat<typename TModel::DataType>::Convert(o, m_QuantizationParams);
+ int classification = index++;
+
+ // Take the first class with each probability
+ // This avoids strange results when looping over batched results produced
+ // with identical test data.
+ std::map<float, int>::iterator lb = resultMap.lower_bound(prob);
+ if (lb == resultMap.end() ||
+ !resultMap.key_comp()(prob, lb->first)) {
+ // If the key is not already in the map, insert it.
+ resultMap.insert(lb, std::map<float, int>::value_type(prob, classification));
+ }
+ }
+ },
+ output);
}
{
@@ -86,8 +93,13 @@ TestCaseResult ClassifierTestCase<TTestCaseDatabase, TModel>::ProcessResult(cons
}
}
- const unsigned int prediction = boost::numeric_cast<unsigned int>(
- std::distance(output.begin(), std::max_element(output.begin(), output.end())));
+ unsigned int prediction = 0;
+ boost::apply_visitor([&](auto&& value)
+ {
+ prediction = boost::numeric_cast<unsigned int>(
+ std::distance(value.begin(), std::max_element(value.begin(), value.end())));
+ },
+ output);
// If we're just running the defaultTestCaseIds, each one must be classified correctly.
if (params.m_IterationCount == 0 && prediction != m_Label)
diff --git a/tests/MobileNetSsdInferenceTest.hpp b/tests/MobileNetSsdInferenceTest.hpp
index cf00966e4b..0091009083 100644
--- a/tests/MobileNetSsdInferenceTest.hpp
+++ b/tests/MobileNetSsdInferenceTest.hpp
@@ -34,16 +34,16 @@ public:
TestCaseResult ProcessResult(const InferenceTestOptions& options) override
{
- const std::vector<float>& output1 = this->GetOutputs()[0]; // bounding boxes
+ const std::vector<float>& output1 = boost::get<std::vector<float>>(this->GetOutputs()[0]); // bounding boxes
BOOST_ASSERT(output1.size() == k_OutputSize1);
- const std::vector<float>& output2 = this->GetOutputs()[1]; // classes
+ const std::vector<float>& output2 = boost::get<std::vector<float>>(this->GetOutputs()[1]); // classes
BOOST_ASSERT(output2.size() == k_OutputSize2);
- const std::vector<float>& output3 = this->GetOutputs()[2]; // scores
+ const std::vector<float>& output3 = boost::get<std::vector<float>>(this->GetOutputs()[2]); // scores
BOOST_ASSERT(output3.size() == k_OutputSize3);
- const std::vector<float>& output4 = this->GetOutputs()[3]; // number of valid detections
+ const std::vector<float>& output4 = boost::get<std::vector<float>>(this->GetOutputs()[3]); // valid detections
BOOST_ASSERT(output4.size() == k_OutputSize4);
// Extract detected objects from output data
diff --git a/tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp b/tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp
index 44b8890fc2..2bbfb69c8d 100644
--- a/tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp
+++ b/tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp
@@ -168,28 +168,34 @@ int main(int argc, char* argv[])
}
Cifar10Database cifar10(dataDir);
+ using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
+
for (unsigned int i = 0; i < 3; ++i)
{
// Loads test case data (including image data).
std::unique_ptr<Cifar10Database::TTestCaseData> testCaseData = cifar10.GetTestCaseData(i);
- using TInputContainer = std::vector<float>;
- using TOutputContainer = std::array<float, 10>;
-
// Tests inference.
- std::vector<TOutputContainer> outputs(networksCount);
+ std::vector<TContainer> outputs;
+ outputs.reserve(networksCount);
+
+ for (unsigned int j = 0; j < networksCount; ++j)
+ {
+ outputs.push_back(std::vector<float>(10));
+ }
+
for (unsigned int k = 0; k < networksCount; ++k)
{
using BindingPointInfo = InferenceModelInternal::BindingPointInfo;
std::vector<BindingPointInfo> inputBindings = { networks[k].m_InputBindingInfo };
std::vector<BindingPointInfo> outputBindings = { networks[k].m_OutputBindingInfo };
- std::vector<TInputContainer> inputData = { testCaseData->m_InputImage };
- std::vector<TOutputContainer> outputData = { outputs[k] };
+ std::vector<TContainer> inputDataContainers = { testCaseData->m_InputImage };
+ std::vector<TContainer> outputDataContainers = { outputs[k] };
status = runtime->EnqueueWorkload(networks[k].m_Network,
- MakeInputTensors(inputBindings, inputData),
- MakeOutputTensors(outputBindings, outputData));
+ MakeInputTensors(inputBindings, inputDataContainers),
+ MakeOutputTensors(outputBindings, outputDataContainers));
if (status == armnn::Status::Failure)
{
BOOST_LOG_TRIVIAL(fatal) << "armnn::IRuntime: Failed to enqueue workload";
@@ -198,9 +204,13 @@ int main(int argc, char* argv[])
}
// Compares outputs.
+ std::vector<float> output0 = boost::get<std::vector<float>>(outputs[0]);
+
for (unsigned int k = 1; k < networksCount; ++k)
{
- if (!std::equal(outputs[0].begin(), outputs[0].end(), outputs[k].begin(), outputs[k].end()))
+ std::vector<float> outputK = boost::get<std::vector<float>>(outputs[k]);
+
+ if (!std::equal(output0.begin(), output0.end(), outputK.begin(), outputK.end()))
{
BOOST_LOG_TRIVIAL(error) << "Multiple networks inference failed!";
return 1;
diff --git a/tests/YoloInferenceTest.hpp b/tests/YoloInferenceTest.hpp
index 98a9d2f106..922bcb8582 100644
--- a/tests/YoloInferenceTest.hpp
+++ b/tests/YoloInferenceTest.hpp
@@ -34,7 +34,7 @@ public:
{
using Boost3dArray = boost::multi_array<float, 3>;
- const std::vector<float>& output = this->GetOutputs()[0];
+ const std::vector<float>& output = boost::get<std::vector<float>>(this->GetOutputs()[0]);
BOOST_ASSERT(output.size() == YoloOutputSize);
constexpr Boost3dArray::index gridSize = 7;