From bee4bc944aa50782ff22cb4a31fbc611212a5e89 Mon Sep 17 00:00:00 2001 From: Francis Murtagh Date: Tue, 18 Jun 2019 12:30:37 +0100 Subject: IVGCVSW-3299 Add Uint8 Support to Model Accuracy Checker * Seperate ExecuteNetwork main function into standalone application * Include NetworkExecutionUtils header and remove duplicate functions * Add uint8 and int32 support to ModelAccuracyChecker Change-Id: I5fb4bc147232f8388f37eea7db5130b04fd215d1 Signed-off-by: Francis Murtagh --- tests/CMakeLists.txt | 15 + tests/ExecuteNetwork/ExecuteNetwork.cpp | 672 +------------------- .../ModelAccuracyTool-Armnn.cpp | 108 ++-- .../NetworkExecutionUtils.hpp | 676 +++++++++++++++++++++ 4 files changed, 735 insertions(+), 736 deletions(-) create mode 100644 tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index dfcf4b48e0..19612543b6 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -294,6 +294,21 @@ endif() if(BUILD_ACCURACY_TOOL) macro(AccuracyTool executorName) target_link_libraries(${executorName} ${CMAKE_THREAD_LIBS_INIT}) + if (BUILD_ARMNN_SERIALIZER) + target_link_libraries(${executorName} armnnSerializer) + endif() + if (BUILD_CAFFE_PARSER) + target_link_libraries(${executorName} armnnCaffeParser) + endif() + if (BUILD_TF_PARSER) + target_link_libraries(${executorName} armnnTfParser) + endif() + if (BUILD_TF_LITE_PARSER) + target_link_libraries(${executorName} armnnTfLiteParser) + endif() + if (BUILD_ONNX_PARSER) + target_link_libraries(${executorName} armnnOnnxParser) + endif() if(OPENCL_LIBRARIES) target_link_libraries(${executorName} ${OPENCL_LIBRARIES}) endif() diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp index 8ca69d8292..60353dbf3f 100644 --- a/tests/ExecuteNetwork/ExecuteNetwork.cpp +++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp @@ -2,678 +2,8 @@ // Copyright © 2017 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // -#include -#include -#if defined(ARMNN_SERIALIZER) -#include "armnnDeserializer/IDeserializer.hpp" -#endif -#if defined(ARMNN_CAFFE_PARSER) -#include "armnnCaffeParser/ICaffeParser.hpp" -#endif -#if defined(ARMNN_TF_PARSER) -#include "armnnTfParser/ITfParser.hpp" -#endif -#if defined(ARMNN_TF_LITE_PARSER) -#include "armnnTfLiteParser/ITfLiteParser.hpp" -#endif -#if defined(ARMNN_ONNX_PARSER) -#include "armnnOnnxParser/IOnnxParser.hpp" -#endif -#include "CsvReader.hpp" -#include "../InferenceTest.hpp" - -#include -#include - -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -namespace -{ - -// Configure boost::program_options for command-line parsing and validation. -namespace po = boost::program_options; - -template -std::vector ParseArrayImpl(std::istream& stream, TParseElementFunc parseElementFunc, const char * chars = "\t ,:") -{ - std::vector result; - // Processes line-by-line. - std::string line; - while (std::getline(stream, line)) - { - std::vector tokens; - try - { - // Coverity fix: boost::split() may throw an exception of type boost::bad_function_call. - boost::split(tokens, line, boost::algorithm::is_any_of(chars), boost::token_compress_on); - } - catch (const std::exception& e) - { - BOOST_LOG_TRIVIAL(error) << "An error occurred when splitting tokens: " << e.what(); - continue; - } - for (const std::string& token : tokens) - { - if (!token.empty()) // See https://stackoverflow.com/questions/10437406/ - { - try - { - result.push_back(parseElementFunc(token)); - } - catch (const std::exception&) - { - BOOST_LOG_TRIVIAL(error) << "'" << token << "' is not a valid number. It has been ignored."; - } - } - } - } - - return result; -} - -bool CheckOption(const po::variables_map& vm, - const char* option) -{ - // Check that the given option is valid. - if (option == nullptr) - { - return false; - } - - // Check whether 'option' is provided. - return vm.find(option) != vm.end(); -} - -void CheckOptionDependency(const po::variables_map& vm, - const char* option, - const char* required) -{ - // Check that the given options are valid. - if (option == nullptr || required == nullptr) - { - throw po::error("Invalid option to check dependency for"); - } - - // Check that if 'option' is provided, 'required' is also provided. - if (CheckOption(vm, option) && !vm[option].defaulted()) - { - if (CheckOption(vm, required) == 0 || vm[required].defaulted()) - { - throw po::error(std::string("Option '") + option + "' requires option '" + required + "'."); - } - } -} - -void CheckOptionDependencies(const po::variables_map& vm) -{ - CheckOptionDependency(vm, "model-path", "model-format"); - CheckOptionDependency(vm, "model-path", "input-name"); - CheckOptionDependency(vm, "model-path", "input-tensor-data"); - CheckOptionDependency(vm, "model-path", "output-name"); - CheckOptionDependency(vm, "input-tensor-shape", "model-path"); -} - -template -auto ParseDataArray(std::istream & stream); - -template -auto ParseDataArray(std::istream& stream, - const float& quantizationScale, - const int32_t& quantizationOffset); - -template<> -auto ParseDataArray(std::istream & stream) -{ - return ParseArrayImpl(stream, [](const std::string& s) { return std::stof(s); }); -} - -template<> -auto ParseDataArray(std::istream & stream) -{ - return ParseArrayImpl(stream, [](const std::string & s) { return std::stoi(s); }); -} - -template<> -auto ParseDataArray(std::istream& stream, - const float& quantizationScale, - const int32_t& quantizationOffset) -{ - return ParseArrayImpl(stream, - [&quantizationScale, &quantizationOffset](const std::string & s) - { - return boost::numeric_cast( - armnn::Quantize(std::stof(s), - quantizationScale, - quantizationOffset)); - }); -} - -std::vector ParseArray(std::istream& stream) -{ - return ParseArrayImpl(stream, - [](const std::string& s) { return boost::numeric_cast(std::stoi(s)); }); -} - -std::vector ParseStringList(const std::string & inputString, const char * delimiter) -{ - std::stringstream stream(inputString); - return ParseArrayImpl(stream, [](const std::string& s) { return boost::trim_copy(s); }, delimiter); -} - -void RemoveDuplicateDevices(std::vector& computeDevices) -{ - // Mark the duplicate devices as 'Undefined'. - for (auto i = computeDevices.begin(); i != computeDevices.end(); ++i) - { - for (auto j = std::next(i); j != computeDevices.end(); ++j) - { - if (*j == *i) - { - *j = armnn::Compute::Undefined; - } - } - } - - // Remove 'Undefined' devices. - computeDevices.erase(std::remove(computeDevices.begin(), computeDevices.end(), armnn::Compute::Undefined), - computeDevices.end()); -} - -struct TensorPrinter : public boost::static_visitor<> -{ - TensorPrinter(const std::string& binding, const armnn::TensorInfo& info) - : m_OutputBinding(binding) - , m_Scale(info.GetQuantizationScale()) - , m_Offset(info.GetQuantizationOffset()) - {} - - void operator()(const std::vector& values) - { - ForEachValue(values, [](float value){ - printf("%f ", value); - }); - } - - void operator()(const std::vector& values) - { - auto& scale = m_Scale; - auto& offset = m_Offset; - ForEachValue(values, [&scale, &offset](uint8_t value) - { - printf("%f ", armnn::Dequantize(value, scale, offset)); - }); - } - - void operator()(const std::vector& values) - { - ForEachValue(values, [](int value) - { - printf("%d ", value); - }); - } - -private: - template - void ForEachValue(const Container& c, Delegate delegate) - { - std::cout << m_OutputBinding << ": "; - for (const auto& value : c) - { - delegate(value); - } - printf("\n"); - } - - std::string m_OutputBinding; - float m_Scale=0.0f; - int m_Offset=0; -}; - - -} // namespace - -template -int MainImpl(const char* modelPath, - bool isModelBinary, - const std::vector& computeDevices, - const std::vector& inputNames, - const std::vector>& inputTensorShapes, - const std::vector& inputTensorDataFilePaths, - const std::vector& inputTypes, - const std::vector& outputTypes, - const std::vector& outputNames, - bool enableProfiling, - bool enableFp16TurboMode, - const double& thresholdTime, - const size_t subgraphId, - const std::shared_ptr& runtime = nullptr) -{ - using TContainer = boost::variant, std::vector, std::vector>; - - std::vector inputDataContainers; - - try - { - // Creates an InferenceModel, which will parse the model and load it into an IRuntime. - typename InferenceModel::Params params; - params.m_ModelPath = modelPath; - params.m_IsModelBinary = isModelBinary; - params.m_ComputeDevices = computeDevices; - - for(const std::string& inputName: inputNames) - { - params.m_InputBindings.push_back(inputName); - } - - for(unsigned int i = 0; i < inputTensorShapes.size(); ++i) - { - params.m_InputShapes.push_back(*inputTensorShapes[i]); - } - - for(const std::string& outputName: outputNames) - { - params.m_OutputBindings.push_back(outputName); - } - - params.m_SubgraphId = subgraphId; - params.m_EnableFp16TurboMode = enableFp16TurboMode; - InferenceModel model(params, enableProfiling, runtime); - - for(unsigned int i = 0; i < inputTensorDataFilePaths.size(); ++i) - { - std::ifstream inputTensorFile(inputTensorDataFilePaths[i]); - - if (inputTypes[i].compare("float") == 0) - { - inputDataContainers.push_back( - ParseDataArray(inputTensorFile)); - } - else if (inputTypes[i].compare("int") == 0) - { - inputDataContainers.push_back( - ParseDataArray(inputTensorFile)); - } - else if (inputTypes[i].compare("qasymm8") == 0) - { - auto inputBinding = model.GetInputBindingInfo(); - inputDataContainers.push_back( - ParseDataArray(inputTensorFile, - inputBinding.second.GetQuantizationScale(), - inputBinding.second.GetQuantizationOffset())); - } - else - { - BOOST_LOG_TRIVIAL(fatal) << "Unsupported tensor data type \"" << inputTypes[i] << "\". "; - return EXIT_FAILURE; - } - - inputTensorFile.close(); - } - - const size_t numOutputs = params.m_OutputBindings.size(); - std::vector outputDataContainers; - - for (unsigned int i = 0; i < numOutputs; ++i) - { - if (outputTypes[i].compare("float") == 0) - { - outputDataContainers.push_back(std::vector(model.GetOutputSize(i))); - } - else if (outputTypes[i].compare("int") == 0) - { - outputDataContainers.push_back(std::vector(model.GetOutputSize(i))); - } - else if (outputTypes[i].compare("qasymm8") == 0) - { - outputDataContainers.push_back(std::vector(model.GetOutputSize(i))); - } - else - { - BOOST_LOG_TRIVIAL(fatal) << "Unsupported tensor data type \"" << outputTypes[i] << "\". "; - return EXIT_FAILURE; - } - } - - // model.Run returns the inference time elapsed in EnqueueWorkload (in milliseconds) - auto inference_duration = model.Run(inputDataContainers, outputDataContainers); - - // Print output tensors - const auto& infosOut = model.GetOutputBindingInfos(); - for (size_t i = 0; i < numOutputs; i++) - { - const armnn::TensorInfo& infoOut = infosOut[i].second; - TensorPrinter printer(params.m_OutputBindings[i], infoOut); - boost::apply_visitor(printer, outputDataContainers[i]); - } - - BOOST_LOG_TRIVIAL(info) << "\nInference time: " << std::setprecision(2) - << std::fixed << inference_duration.count() << " ms"; - - // If thresholdTime == 0.0 (default), then it hasn't been supplied at command line - if (thresholdTime != 0.0) - { - BOOST_LOG_TRIVIAL(info) << "Threshold time: " << std::setprecision(2) - << std::fixed << thresholdTime << " ms"; - auto thresholdMinusInference = thresholdTime - inference_duration.count(); - BOOST_LOG_TRIVIAL(info) << "Threshold time - Inference time: " << std::setprecision(2) - << std::fixed << thresholdMinusInference << " ms" << "\n"; - - if (thresholdMinusInference < 0) - { - BOOST_LOG_TRIVIAL(fatal) << "Elapsed inference time is greater than provided threshold time.\n"; - return EXIT_FAILURE; - } - } - - - } - catch (armnn::Exception const& e) - { - BOOST_LOG_TRIVIAL(fatal) << "Armnn Error: " << e.what(); - return EXIT_FAILURE; - } - - return EXIT_SUCCESS; -} - -// This will run a test -int RunTest(const std::string& format, - const std::string& inputTensorShapesStr, - const vector& computeDevice, - const std::string& path, - const std::string& inputNames, - const std::string& inputTensorDataFilePaths, - const std::string& inputTypes, - const std::string& outputTypes, - const std::string& outputNames, - bool enableProfiling, - bool enableFp16TurboMode, - const double& thresholdTime, - const size_t subgraphId, - const std::shared_ptr& runtime = nullptr) -{ - std::string modelFormat = boost::trim_copy(format); - std::string modelPath = boost::trim_copy(path); - std::vector inputNamesVector = ParseStringList(inputNames, ","); - std::vector inputTensorShapesVector = ParseStringList(inputTensorShapesStr, ";"); - std::vector inputTensorDataFilePathsVector = ParseStringList( - inputTensorDataFilePaths, ","); - std::vector outputNamesVector = ParseStringList(outputNames, ","); - std::vector inputTypesVector = ParseStringList(inputTypes, ","); - std::vector outputTypesVector = ParseStringList(outputTypes, ","); - - // Parse model binary flag from the model-format string we got from the command-line - bool isModelBinary; - if (modelFormat.find("bin") != std::string::npos) - { - isModelBinary = true; - } - else if (modelFormat.find("txt") != std::string::npos || modelFormat.find("text") != std::string::npos) - { - isModelBinary = false; - } - else - { - BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat << "'. Please include 'binary' or 'text'"; - return EXIT_FAILURE; - } - - if ((inputTensorShapesVector.size() != 0) && (inputTensorShapesVector.size() != inputNamesVector.size())) - { - BOOST_LOG_TRIVIAL(fatal) << "input-name and input-tensor-shape must have the same amount of elements."; - return EXIT_FAILURE; - } - - if ((inputTensorDataFilePathsVector.size() != 0) && - (inputTensorDataFilePathsVector.size() != inputNamesVector.size())) - { - BOOST_LOG_TRIVIAL(fatal) << "input-name and input-tensor-data must have the same amount of elements."; - return EXIT_FAILURE; - } - - if (inputTypesVector.size() == 0) - { - //Defaults the value of all inputs to "float" - inputTypesVector.assign(inputNamesVector.size(), "float"); - } - if (outputTypesVector.size() == 0) - { - //Defaults the value of all outputs to "float" - outputTypesVector.assign(outputNamesVector.size(), "float"); - } - else if ((inputTypesVector.size() != 0) && (inputTypesVector.size() != inputNamesVector.size())) - { - BOOST_LOG_TRIVIAL(fatal) << "input-name and input-type must have the same amount of elements."; - return EXIT_FAILURE; - } - - // Parse input tensor shape from the string we got from the command-line. - std::vector> inputTensorShapes; - - if (!inputTensorShapesVector.empty()) - { - inputTensorShapes.reserve(inputTensorShapesVector.size()); - - for(const std::string& shape : inputTensorShapesVector) - { - std::stringstream ss(shape); - std::vector dims = ParseArray(ss); - - try - { - // Coverity fix: An exception of type armnn::InvalidArgumentException is thrown and never caught. - inputTensorShapes.push_back(std::make_unique(dims.size(), dims.data())); - } - catch (const armnn::InvalidArgumentException& e) - { - BOOST_LOG_TRIVIAL(fatal) << "Cannot create tensor shape: " << e.what(); - return EXIT_FAILURE; - } - } - } - - // Check that threshold time is not less than zero - if (thresholdTime < 0) - { - BOOST_LOG_TRIVIAL(fatal) << "Threshold time supplied as a commoand line argument is less than zero."; - return EXIT_FAILURE; - } - - // Forward to implementation based on the parser type - if (modelFormat.find("armnn") != std::string::npos) - { -#if defined(ARMNN_SERIALIZER) - return MainImpl( - modelPath.c_str(), isModelBinary, computeDevice, - inputNamesVector, inputTensorShapes, - inputTensorDataFilePathsVector, inputTypesVector, - outputTypesVector, outputNamesVector, enableProfiling, - enableFp16TurboMode, thresholdTime, subgraphId, runtime); -#else - BOOST_LOG_TRIVIAL(fatal) << "Not built with serialization support."; - return EXIT_FAILURE; -#endif - } - else if (modelFormat.find("caffe") != std::string::npos) - { -#if defined(ARMNN_CAFFE_PARSER) - return MainImpl(modelPath.c_str(), isModelBinary, computeDevice, - inputNamesVector, inputTensorShapes, - inputTensorDataFilePathsVector, inputTypesVector, - outputTypesVector, outputNamesVector, enableProfiling, - enableFp16TurboMode, thresholdTime, subgraphId, runtime); -#else - BOOST_LOG_TRIVIAL(fatal) << "Not built with Caffe parser support."; - return EXIT_FAILURE; -#endif - } - else if (modelFormat.find("onnx") != std::string::npos) -{ -#if defined(ARMNN_ONNX_PARSER) - return MainImpl(modelPath.c_str(), isModelBinary, computeDevice, - inputNamesVector, inputTensorShapes, - inputTensorDataFilePathsVector, inputTypesVector, - outputTypesVector, outputNamesVector, enableProfiling, - enableFp16TurboMode, thresholdTime, subgraphId, runtime); -#else - BOOST_LOG_TRIVIAL(fatal) << "Not built with Onnx parser support."; - return EXIT_FAILURE; -#endif - } - else if (modelFormat.find("tensorflow") != std::string::npos) - { -#if defined(ARMNN_TF_PARSER) - return MainImpl(modelPath.c_str(), isModelBinary, computeDevice, - inputNamesVector, inputTensorShapes, - inputTensorDataFilePathsVector, inputTypesVector, - outputTypesVector, outputNamesVector, enableProfiling, - enableFp16TurboMode, thresholdTime, subgraphId, runtime); -#else - BOOST_LOG_TRIVIAL(fatal) << "Not built with Tensorflow parser support."; - return EXIT_FAILURE; -#endif - } - else if(modelFormat.find("tflite") != std::string::npos) - { -#if defined(ARMNN_TF_LITE_PARSER) - if (! isModelBinary) - { - BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat << "'. Only 'binary' format supported \ - for tflite files"; - return EXIT_FAILURE; - } - return MainImpl(modelPath.c_str(), isModelBinary, computeDevice, - inputNamesVector, inputTensorShapes, - inputTensorDataFilePathsVector, inputTypesVector, - outputTypesVector, outputNamesVector, enableProfiling, - enableFp16TurboMode, thresholdTime, subgraphId, - runtime); -#else - BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat << - "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'"; - return EXIT_FAILURE; -#endif - } - else - { - BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat << - "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'"; - return EXIT_FAILURE; - } -} - -int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptr& runtime, - const bool enableProfiling, const bool enableFp16TurboMode, const double& thresholdTime) -{ - std::string modelFormat; - std::string modelPath; - std::string inputNames; - std::string inputTensorShapes; - std::string inputTensorDataFilePaths; - std::string outputNames; - std::string inputTypes; - std::string outputTypes; - - size_t subgraphId = 0; - - const std::string backendsMessage = std::string("The preferred order of devices to run layers on by default. ") - + std::string("Possible choices: ") - + armnn::BackendRegistryInstance().GetBackendIdsAsString(); - - po::options_description desc("Options"); - try - { - desc.add_options() - ("model-format,f", po::value(&modelFormat), - "armnn-binary, caffe-binary, caffe-text, tflite-binary, onnx-binary, onnx-text, tensorflow-binary or " - "tensorflow-text.") - ("model-path,m", po::value(&modelPath), "Path to model file, e.g. .armnn, .caffemodel, .prototxt, " - ".tflite, .onnx") - ("compute,c", po::value>()->multitoken(), - backendsMessage.c_str()) - ("input-name,i", po::value(&inputNames), "Identifier of the input tensors in the network separated by comma.") - ("subgraph-number,n", po::value(&subgraphId)->default_value(0), "Id of the subgraph to be " - "executed. Defaults to 0.") - ("input-tensor-shape,s", po::value(&inputTensorShapes), - "The shape of the input tensors in the network as a flat array of integers separated by comma. " - "Several shapes can be passed separating them by semicolon. " - "This parameter is optional, depending on the network.") - ("input-tensor-data,d", po::value(&inputTensorDataFilePaths), - "Path to files containing the input data as a flat array separated by whitespace. " - "Several paths can be passed separating them by comma.") - ("input-type,y",po::value(&inputTypes), "The type of the input tensors in the network separated by comma. " - "If unset, defaults to \"float\" for all defined inputs. " - "Accepted values (float, int or qasymm8).") - ("output-type,z",po::value(&outputTypes), "The type of the output tensors in the network separated by comma. " - "If unset, defaults to \"float\" for all defined outputs. " - "Accepted values (float, int or qasymm8).") - ("output-name,o", po::value(&outputNames), - "Identifier of the output tensors in the network separated by comma."); - } - catch (const std::exception& e) - { - // Coverity points out that default_value(...) can throw a bad_lexical_cast, - // and that desc.add_options() can throw boost::io::too_few_args. - // They really won't in any of these cases. - BOOST_ASSERT_MSG(false, "Caught unexpected exception"); - BOOST_LOG_TRIVIAL(fatal) << "Fatal internal error: " << e.what(); - return EXIT_FAILURE; - } - - std::vector clOptions; - clOptions.reserve(csvRow.values.size()); - for (const std::string& value : csvRow.values) - { - clOptions.push_back(value.c_str()); - } - - po::variables_map vm; - try - { - po::store(po::parse_command_line(static_cast(clOptions.size()), clOptions.data(), desc), vm); - - po::notify(vm); - - CheckOptionDependencies(vm); - } - catch (const po::error& e) - { - std::cerr << e.what() << std::endl << std::endl; - std::cerr << desc << std::endl; - return EXIT_FAILURE; - } - - // Get the preferred order of compute devices. - std::vector computeDevices = vm["compute"].as>(); - - // Remove duplicates from the list of compute devices. - RemoveDuplicateDevices(computeDevices); - - // Check that the specified compute devices are valid. - std::string invalidBackends; - if (!CheckRequestedBackendsAreValid(computeDevices, armnn::Optional(invalidBackends))) - { - BOOST_LOG_TRIVIAL(fatal) << "The list of preferred devices contains invalid backend IDs: " - << invalidBackends; - return EXIT_FAILURE; - } - - return RunTest(modelFormat, inputTensorShapes, computeDevices, modelPath, inputNames, - inputTensorDataFilePaths, inputTypes, outputTypes, outputNames, - enableProfiling, enableFp16TurboMode, thresholdTime, subgraphId); -} +#include "../NetworkExecutionUtils/NetworkExecutionUtils.hpp" // MAIN int main(int argc, const char* argv[]) diff --git a/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp b/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp index 3abfb3c2ec..aec4d70271 100644 --- a/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp +++ b/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp @@ -4,9 +4,9 @@ // #include "ModelAccuracyChecker.hpp" -#include "../InferenceTest.hpp" #include "../ImagePreprocessor.hpp" #include "armnnDeserializer/IDeserializer.hpp" +#include "../NetworkExecutionUtils/NetworkExecutionUtils.hpp" #include #include @@ -14,70 +14,8 @@ using namespace armnn::test; -namespace po = boost::program_options; - -bool CheckOption(const po::variables_map& vm, - const char* option) -{ - // Check that the given option is valid. - if (option == nullptr) - { - return false; - } - - // Check whether 'option' is provided. - return vm.find(option) != vm.end(); -} - -template -std::vector ParseArrayImpl(std::istream& stream, TParseElementFunc parseElementFunc, const char * chars = "\t ,:") -{ - std::vector result; - // Processes line-by-line. - std::string line; - while (std::getline(stream, line)) - { - std::vector tokens; - try - { - // Coverity fix: boost::split() may throw an exception of type boost::bad_function_call. - boost::split(tokens, line, boost::algorithm::is_any_of(chars), boost::token_compress_on); - } - catch (const std::exception& e) - { - BOOST_LOG_TRIVIAL(error) << "An error occurred when splitting tokens: " << e.what(); - continue; - } - for (const std::string& token : tokens) - { - if (!token.empty()) // See https://stackoverflow.com/questions/10437406/ - { - try - { - result.push_back(parseElementFunc(token)); - } - catch (const std::exception&) - { - BOOST_LOG_TRIVIAL(error) << "'" << token << "' is not a valid number. It has been ignored."; - } - } - } - } - - return result; -} - map LoadValidationLabels(const string & validationLabelPath); -template -auto ParseDataArray(std::istream & stream); - -template<> -auto ParseDataArray(std::istream & stream) -{ - return ParseArrayImpl(stream, [](const std::string& s) { return std::stof(s); }); -} - int main(int argc, char* argv[]) { try @@ -94,6 +32,7 @@ int main(int argc, char* argv[]) std::vector defaultBackends = {armnn::Compute::CpuAcc, armnn::Compute::CpuRef}; std::string modelPath; std::string dataDir; + std::string inputType = "float"; std::string inputName; std::string outputName; std::string validationLabelPath; @@ -112,6 +51,9 @@ int main(int argc, char* argv[]) backendsMessage.c_str()) ("data-dir,d", po::value(&dataDir)->required(), "Path to directory containing the ImageNet test data") + ("input-type,y", po::value(&inputType), "The data type of the input tensors." + "If unset, defaults to \"float\" for all defined inputs. " + "Accepted values (float, int or qasymm8)") ("input-name,i", po::value(&inputName)->required(), "Identifier of the input tensors in the network separated by comma.") ("output-name,o", po::value(&outputName)->required(), @@ -217,14 +159,50 @@ int main(int argc, char* argv[]) if(ValidateDirectory(dataDir)) { + InferenceModel::Params params; + params.m_ModelPath = modelPath; + params.m_IsModelBinary = true; + params.m_ComputeDevices = computeDevice; + params.m_InputBindings.push_back(inputName); + params.m_OutputBindings.push_back(outputName); + + using TParser = armnnDeserializer::IDeserializer; + InferenceModel model(params, false); for (auto & imageEntry : boost::make_iterator_range(directory_iterator(pathToDataDir), {})) { cout << "Processing image: " << imageEntry << "\n"; std::ifstream inputTensorFile(imageEntry.path().string()); vector inputDataContainers; - inputDataContainers.push_back(ParseDataArray(inputTensorFile)); - vector outputDataContainers = {vector(1001)}; + vector outputDataContainers; + + if (inputType.compare("float") == 0) + { + inputDataContainers.push_back( + ParseDataArray(inputTensorFile)); + outputDataContainers = {vector(1001)}; + } + else if (inputType.compare("int") == 0) + { + inputDataContainers.push_back( + ParseDataArray(inputTensorFile)); + outputDataContainers = {vector(1001)}; + } + else if (inputType.compare("qasymm8") == 0) + { + auto inputBinding = model.GetInputBindingInfo(); + inputDataContainers.push_back( + ParseDataArray( + inputTensorFile, + inputBinding.second.GetQuantizationScale(), + inputBinding.second.GetQuantizationOffset())); + outputDataContainers = {vector(1001)}; + } + else + { + BOOST_LOG_TRIVIAL(fatal) << "Unsupported tensor data type \"" << inputType << "\". "; + return EXIT_FAILURE; + } status = runtime->EnqueueWorkload(networkId, armnnUtils::MakeInputTensors(inputBindings, inputDataContainers), diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp new file mode 100644 index 0000000000..9d7e368dad --- /dev/null +++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp @@ -0,0 +1,676 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#include +#include + +#if defined(ARMNN_SERIALIZER) +#include "armnnDeserializer/IDeserializer.hpp" +#endif +#if defined(ARMNN_CAFFE_PARSER) +#include "armnnCaffeParser/ICaffeParser.hpp" +#endif +#if defined(ARMNN_TF_PARSER) +#include "armnnTfParser/ITfParser.hpp" +#endif +#if defined(ARMNN_TF_LITE_PARSER) +#include "armnnTfLiteParser/ITfLiteParser.hpp" +#endif +#if defined(ARMNN_ONNX_PARSER) +#include "armnnOnnxParser/IOnnxParser.hpp" +#endif +#include "CsvReader.hpp" +#include "../InferenceTest.hpp" + +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace +{ + +// Configure boost::program_options for command-line parsing and validation. +namespace po = boost::program_options; + +template +std::vector ParseArrayImpl(std::istream& stream, TParseElementFunc parseElementFunc, const char * chars = "\t ,:") +{ + std::vector result; + // Processes line-by-line. + std::string line; + while (std::getline(stream, line)) + { + std::vector tokens; + try + { + // Coverity fix: boost::split() may throw an exception of type boost::bad_function_call. + boost::split(tokens, line, boost::algorithm::is_any_of(chars), boost::token_compress_on); + } + catch (const std::exception& e) + { + BOOST_LOG_TRIVIAL(error) << "An error occurred when splitting tokens: " << e.what(); + continue; + } + for (const std::string& token : tokens) + { + if (!token.empty()) // See https://stackoverflow.com/questions/10437406/ + { + try + { + result.push_back(parseElementFunc(token)); + } + catch (const std::exception&) + { + BOOST_LOG_TRIVIAL(error) << "'" << token << "' is not a valid number. It has been ignored."; + } + } + } + } + + return result; +} + +bool CheckOption(const po::variables_map& vm, + const char* option) +{ + // Check that the given option is valid. + if (option == nullptr) + { + return false; + } + + // Check whether 'option' is provided. + return vm.find(option) != vm.end(); +} + +void CheckOptionDependency(const po::variables_map& vm, + const char* option, + const char* required) +{ + // Check that the given options are valid. + if (option == nullptr || required == nullptr) + { + throw po::error("Invalid option to check dependency for"); + } + + // Check that if 'option' is provided, 'required' is also provided. + if (CheckOption(vm, option) && !vm[option].defaulted()) + { + if (CheckOption(vm, required) == 0 || vm[required].defaulted()) + { + throw po::error(std::string("Option '") + option + "' requires option '" + required + "'."); + } + } +} + +void CheckOptionDependencies(const po::variables_map& vm) +{ + CheckOptionDependency(vm, "model-path", "model-format"); + CheckOptionDependency(vm, "model-path", "input-name"); + CheckOptionDependency(vm, "model-path", "input-tensor-data"); + CheckOptionDependency(vm, "model-path", "output-name"); + CheckOptionDependency(vm, "input-tensor-shape", "model-path"); +} + +template +auto ParseDataArray(std::istream & stream); + +template +auto ParseDataArray(std::istream& stream, + const float& quantizationScale, + const int32_t& quantizationOffset); + +template<> +auto ParseDataArray(std::istream & stream) +{ + return ParseArrayImpl(stream, [](const std::string& s) { return std::stof(s); }); +} + +template<> +auto ParseDataArray(std::istream & stream) +{ + return ParseArrayImpl(stream, [](const std::string & s) { return std::stoi(s); }); +} + +template<> +auto ParseDataArray(std::istream& stream, + const float& quantizationScale, + const int32_t& quantizationOffset) +{ + return ParseArrayImpl(stream, + [&quantizationScale, &quantizationOffset](const std::string & s) + { + return boost::numeric_cast( + armnn::Quantize(std::stof(s), + quantizationScale, + quantizationOffset)); + }); +} + +std::vector ParseArray(std::istream& stream) +{ + return ParseArrayImpl(stream, + [](const std::string& s) { return boost::numeric_cast(std::stoi(s)); }); +} + +std::vector ParseStringList(const std::string & inputString, const char * delimiter) +{ + std::stringstream stream(inputString); + return ParseArrayImpl(stream, [](const std::string& s) { return boost::trim_copy(s); }, delimiter); +} + +void RemoveDuplicateDevices(std::vector& computeDevices) +{ + // Mark the duplicate devices as 'Undefined'. + for (auto i = computeDevices.begin(); i != computeDevices.end(); ++i) + { + for (auto j = std::next(i); j != computeDevices.end(); ++j) + { + if (*j == *i) + { + *j = armnn::Compute::Undefined; + } + } + } + + // Remove 'Undefined' devices. + computeDevices.erase(std::remove(computeDevices.begin(), computeDevices.end(), armnn::Compute::Undefined), + computeDevices.end()); +} + +struct TensorPrinter : public boost::static_visitor<> +{ + TensorPrinter(const std::string& binding, const armnn::TensorInfo& info) + : m_OutputBinding(binding) + , m_Scale(info.GetQuantizationScale()) + , m_Offset(info.GetQuantizationOffset()) + {} + + void operator()(const std::vector& values) + { + ForEachValue(values, [](float value){ + printf("%f ", value); + }); + } + + void operator()(const std::vector& values) + { + auto& scale = m_Scale; + auto& offset = m_Offset; + ForEachValue(values, [&scale, &offset](uint8_t value) + { + printf("%f ", armnn::Dequantize(value, scale, offset)); + }); + } + + void operator()(const std::vector& values) + { + ForEachValue(values, [](int value) + { + printf("%d ", value); + }); + } + +private: + template + void ForEachValue(const Container& c, Delegate delegate) + { + std::cout << m_OutputBinding << ": "; + for (const auto& value : c) + { + delegate(value); + } + printf("\n"); + } + + std::string m_OutputBinding; + float m_Scale=0.0f; + int m_Offset=0; +}; + + +} // namespace + +template +int MainImpl(const char* modelPath, + bool isModelBinary, + const std::vector& computeDevices, + const std::vector& inputNames, + const std::vector>& inputTensorShapes, + const std::vector& inputTensorDataFilePaths, + const std::vector& inputTypes, + const std::vector& outputTypes, + const std::vector& outputNames, + bool enableProfiling, + bool enableFp16TurboMode, + const double& thresholdTime, + const size_t subgraphId, + const std::shared_ptr& runtime = nullptr) +{ + using TContainer = boost::variant, std::vector, std::vector>; + + std::vector inputDataContainers; + + try + { + // Creates an InferenceModel, which will parse the model and load it into an IRuntime. + typename InferenceModel::Params params; + params.m_ModelPath = modelPath; + params.m_IsModelBinary = isModelBinary; + params.m_ComputeDevices = computeDevices; + + for(const std::string& inputName: inputNames) + { + params.m_InputBindings.push_back(inputName); + } + + for(unsigned int i = 0; i < inputTensorShapes.size(); ++i) + { + params.m_InputShapes.push_back(*inputTensorShapes[i]); + } + + for(const std::string& outputName: outputNames) + { + params.m_OutputBindings.push_back(outputName); + } + + params.m_SubgraphId = subgraphId; + params.m_EnableFp16TurboMode = enableFp16TurboMode; + InferenceModel model(params, enableProfiling, runtime); + + for(unsigned int i = 0; i < inputTensorDataFilePaths.size(); ++i) + { + std::ifstream inputTensorFile(inputTensorDataFilePaths[i]); + + if (inputTypes[i].compare("float") == 0) + { + inputDataContainers.push_back( + ParseDataArray(inputTensorFile)); + } + else if (inputTypes[i].compare("int") == 0) + { + inputDataContainers.push_back( + ParseDataArray(inputTensorFile)); + } + else if (inputTypes[i].compare("qasymm8") == 0) + { + auto inputBinding = model.GetInputBindingInfo(); + inputDataContainers.push_back( + ParseDataArray(inputTensorFile, + inputBinding.second.GetQuantizationScale(), + inputBinding.second.GetQuantizationOffset())); + } + else + { + BOOST_LOG_TRIVIAL(fatal) << "Unsupported tensor data type \"" << inputTypes[i] << "\". "; + return EXIT_FAILURE; + } + + inputTensorFile.close(); + } + + const size_t numOutputs = params.m_OutputBindings.size(); + std::vector outputDataContainers; + + for (unsigned int i = 0; i < numOutputs; ++i) + { + if (outputTypes[i].compare("float") == 0) + { + outputDataContainers.push_back(std::vector(model.GetOutputSize(i))); + } + else if (outputTypes[i].compare("int") == 0) + { + outputDataContainers.push_back(std::vector(model.GetOutputSize(i))); + } + else if (outputTypes[i].compare("qasymm8") == 0) + { + outputDataContainers.push_back(std::vector(model.GetOutputSize(i))); + } + else + { + BOOST_LOG_TRIVIAL(fatal) << "Unsupported tensor data type \"" << outputTypes[i] << "\". "; + return EXIT_FAILURE; + } + } + + // model.Run returns the inference time elapsed in EnqueueWorkload (in milliseconds) + auto inference_duration = model.Run(inputDataContainers, outputDataContainers); + + // Print output tensors + const auto& infosOut = model.GetOutputBindingInfos(); + for (size_t i = 0; i < numOutputs; i++) + { + const armnn::TensorInfo& infoOut = infosOut[i].second; + TensorPrinter printer(params.m_OutputBindings[i], infoOut); + boost::apply_visitor(printer, outputDataContainers[i]); + } + + BOOST_LOG_TRIVIAL(info) << "\nInference time: " << std::setprecision(2) + << std::fixed << inference_duration.count() << " ms"; + + // If thresholdTime == 0.0 (default), then it hasn't been supplied at command line + if (thresholdTime != 0.0) + { + BOOST_LOG_TRIVIAL(info) << "Threshold time: " << std::setprecision(2) + << std::fixed << thresholdTime << " ms"; + auto thresholdMinusInference = thresholdTime - inference_duration.count(); + BOOST_LOG_TRIVIAL(info) << "Threshold time - Inference time: " << std::setprecision(2) + << std::fixed << thresholdMinusInference << " ms" << "\n"; + + if (thresholdMinusInference < 0) + { + BOOST_LOG_TRIVIAL(fatal) << "Elapsed inference time is greater than provided threshold time.\n"; + return EXIT_FAILURE; + } + } + + + } + catch (armnn::Exception const& e) + { + BOOST_LOG_TRIVIAL(fatal) << "Armnn Error: " << e.what(); + return EXIT_FAILURE; + } + + return EXIT_SUCCESS; +} + +// This will run a test +int RunTest(const std::string& format, + const std::string& inputTensorShapesStr, + const vector& computeDevice, + const std::string& path, + const std::string& inputNames, + const std::string& inputTensorDataFilePaths, + const std::string& inputTypes, + const std::string& outputTypes, + const std::string& outputNames, + bool enableProfiling, + bool enableFp16TurboMode, + const double& thresholdTime, + const size_t subgraphId, + const std::shared_ptr& runtime = nullptr) +{ + std::string modelFormat = boost::trim_copy(format); + std::string modelPath = boost::trim_copy(path); + std::vector inputNamesVector = ParseStringList(inputNames, ","); + std::vector inputTensorShapesVector = ParseStringList(inputTensorShapesStr, ";"); + std::vector inputTensorDataFilePathsVector = ParseStringList( + inputTensorDataFilePaths, ","); + std::vector outputNamesVector = ParseStringList(outputNames, ","); + std::vector inputTypesVector = ParseStringList(inputTypes, ","); + std::vector outputTypesVector = ParseStringList(outputTypes, ","); + + // Parse model binary flag from the model-format string we got from the command-line + bool isModelBinary; + if (modelFormat.find("bin") != std::string::npos) + { + isModelBinary = true; + } + else if (modelFormat.find("txt") != std::string::npos || modelFormat.find("text") != std::string::npos) + { + isModelBinary = false; + } + else + { + BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat << "'. Please include 'binary' or 'text'"; + return EXIT_FAILURE; + } + + if ((inputTensorShapesVector.size() != 0) && (inputTensorShapesVector.size() != inputNamesVector.size())) + { + BOOST_LOG_TRIVIAL(fatal) << "input-name and input-tensor-shape must have the same amount of elements."; + return EXIT_FAILURE; + } + + if ((inputTensorDataFilePathsVector.size() != 0) && + (inputTensorDataFilePathsVector.size() != inputNamesVector.size())) + { + BOOST_LOG_TRIVIAL(fatal) << "input-name and input-tensor-data must have the same amount of elements."; + return EXIT_FAILURE; + } + + if (inputTypesVector.size() == 0) + { + //Defaults the value of all inputs to "float" + inputTypesVector.assign(inputNamesVector.size(), "float"); + } + if (outputTypesVector.size() == 0) + { + //Defaults the value of all outputs to "float" + outputTypesVector.assign(outputNamesVector.size(), "float"); + } + else if ((inputTypesVector.size() != 0) && (inputTypesVector.size() != inputNamesVector.size())) + { + BOOST_LOG_TRIVIAL(fatal) << "input-name and input-type must have the same amount of elements."; + return EXIT_FAILURE; + } + + // Parse input tensor shape from the string we got from the command-line. + std::vector> inputTensorShapes; + + if (!inputTensorShapesVector.empty()) + { + inputTensorShapes.reserve(inputTensorShapesVector.size()); + + for(const std::string& shape : inputTensorShapesVector) + { + std::stringstream ss(shape); + std::vector dims = ParseArray(ss); + + try + { + // Coverity fix: An exception of type armnn::InvalidArgumentException is thrown and never caught. + inputTensorShapes.push_back(std::make_unique(dims.size(), dims.data())); + } + catch (const armnn::InvalidArgumentException& e) + { + BOOST_LOG_TRIVIAL(fatal) << "Cannot create tensor shape: " << e.what(); + return EXIT_FAILURE; + } + } + } + + // Check that threshold time is not less than zero + if (thresholdTime < 0) + { + BOOST_LOG_TRIVIAL(fatal) << "Threshold time supplied as a commoand line argument is less than zero."; + return EXIT_FAILURE; + } + + // Forward to implementation based on the parser type + if (modelFormat.find("armnn") != std::string::npos) + { +#if defined(ARMNN_SERIALIZER) + return MainImpl( + modelPath.c_str(), isModelBinary, computeDevice, + inputNamesVector, inputTensorShapes, + inputTensorDataFilePathsVector, inputTypesVector, + outputTypesVector, outputNamesVector, enableProfiling, + enableFp16TurboMode, thresholdTime, subgraphId, runtime); +#else + BOOST_LOG_TRIVIAL(fatal) << "Not built with serialization support."; + return EXIT_FAILURE; +#endif + } + else if (modelFormat.find("caffe") != std::string::npos) + { +#if defined(ARMNN_CAFFE_PARSER) + return MainImpl(modelPath.c_str(), isModelBinary, computeDevice, + inputNamesVector, inputTensorShapes, + inputTensorDataFilePathsVector, inputTypesVector, + outputTypesVector, outputNamesVector, enableProfiling, + enableFp16TurboMode, thresholdTime, subgraphId, runtime); +#else + BOOST_LOG_TRIVIAL(fatal) << "Not built with Caffe parser support."; + return EXIT_FAILURE; +#endif + } + else if (modelFormat.find("onnx") != std::string::npos) +{ +#if defined(ARMNN_ONNX_PARSER) + return MainImpl(modelPath.c_str(), isModelBinary, computeDevice, + inputNamesVector, inputTensorShapes, + inputTensorDataFilePathsVector, inputTypesVector, + outputTypesVector, outputNamesVector, enableProfiling, + enableFp16TurboMode, thresholdTime, subgraphId, runtime); +#else + BOOST_LOG_TRIVIAL(fatal) << "Not built with Onnx parser support."; + return EXIT_FAILURE; +#endif + } + else if (modelFormat.find("tensorflow") != std::string::npos) + { +#if defined(ARMNN_TF_PARSER) + return MainImpl(modelPath.c_str(), isModelBinary, computeDevice, + inputNamesVector, inputTensorShapes, + inputTensorDataFilePathsVector, inputTypesVector, + outputTypesVector, outputNamesVector, enableProfiling, + enableFp16TurboMode, thresholdTime, subgraphId, runtime); +#else + BOOST_LOG_TRIVIAL(fatal) << "Not built with Tensorflow parser support."; + return EXIT_FAILURE; +#endif + } + else if(modelFormat.find("tflite") != std::string::npos) + { +#if defined(ARMNN_TF_LITE_PARSER) + if (! isModelBinary) + { + BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat << "'. Only 'binary' format supported \ + for tflite files"; + return EXIT_FAILURE; + } + return MainImpl(modelPath.c_str(), isModelBinary, computeDevice, + inputNamesVector, inputTensorShapes, + inputTensorDataFilePathsVector, inputTypesVector, + outputTypesVector, outputNamesVector, enableProfiling, + enableFp16TurboMode, thresholdTime, subgraphId, + runtime); +#else + BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat << + "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'"; + return EXIT_FAILURE; +#endif + } + else + { + BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat << + "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'"; + return EXIT_FAILURE; + } +} + +int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptr& runtime, + const bool enableProfiling, const bool enableFp16TurboMode, const double& thresholdTime) +{ + std::string modelFormat; + std::string modelPath; + std::string inputNames; + std::string inputTensorShapes; + std::string inputTensorDataFilePaths; + std::string outputNames; + std::string inputTypes; + std::string outputTypes; + + size_t subgraphId = 0; + + const std::string backendsMessage = std::string("The preferred order of devices to run layers on by default. ") + + std::string("Possible choices: ") + + armnn::BackendRegistryInstance().GetBackendIdsAsString(); + + po::options_description desc("Options"); + try + { + desc.add_options() + ("model-format,f", po::value(&modelFormat), + "armnn-binary, caffe-binary, caffe-text, tflite-binary, onnx-binary, onnx-text, tensorflow-binary or " + "tensorflow-text.") + ("model-path,m", po::value(&modelPath), "Path to model file, e.g. .armnn, .caffemodel, .prototxt, " + ".tflite, .onnx") + ("compute,c", po::value>()->multitoken(), + backendsMessage.c_str()) + ("input-name,i", po::value(&inputNames), "Identifier of the input tensors in the network separated by comma.") + ("subgraph-number,n", po::value(&subgraphId)->default_value(0), "Id of the subgraph to be " + "executed. Defaults to 0.") + ("input-tensor-shape,s", po::value(&inputTensorShapes), + "The shape of the input tensors in the network as a flat array of integers separated by comma. " + "Several shapes can be passed separating them by semicolon. " + "This parameter is optional, depending on the network.") + ("input-tensor-data,d", po::value(&inputTensorDataFilePaths), + "Path to files containing the input data as a flat array separated by whitespace. " + "Several paths can be passed separating them by comma.") + ("input-type,y",po::value(&inputTypes), "The type of the input tensors in the network separated by comma. " + "If unset, defaults to \"float\" for all defined inputs. " + "Accepted values (float, int or qasymm8).") + ("output-type,z",po::value(&outputTypes), "The type of the output tensors in the network separated by comma. " + "If unset, defaults to \"float\" for all defined outputs. " + "Accepted values (float, int or qasymm8).") + ("output-name,o", po::value(&outputNames), + "Identifier of the output tensors in the network separated by comma."); + } + catch (const std::exception& e) + { + // Coverity points out that default_value(...) can throw a bad_lexical_cast, + // and that desc.add_options() can throw boost::io::too_few_args. + // They really won't in any of these cases. + BOOST_ASSERT_MSG(false, "Caught unexpected exception"); + BOOST_LOG_TRIVIAL(fatal) << "Fatal internal error: " << e.what(); + return EXIT_FAILURE; + } + + std::vector clOptions; + clOptions.reserve(csvRow.values.size()); + for (const std::string& value : csvRow.values) + { + clOptions.push_back(value.c_str()); + } + + po::variables_map vm; + try + { + po::store(po::parse_command_line(static_cast(clOptions.size()), clOptions.data(), desc), vm); + + po::notify(vm); + + CheckOptionDependencies(vm); + } + catch (const po::error& e) + { + std::cerr << e.what() << std::endl << std::endl; + std::cerr << desc << std::endl; + return EXIT_FAILURE; + } + + // Get the preferred order of compute devices. + std::vector computeDevices = vm["compute"].as>(); + + // Remove duplicates from the list of compute devices. + RemoveDuplicateDevices(computeDevices); + + // Check that the specified compute devices are valid. + std::string invalidBackends; + if (!CheckRequestedBackendsAreValid(computeDevices, armnn::Optional(invalidBackends))) + { + BOOST_LOG_TRIVIAL(fatal) << "The list of preferred devices contains invalid backend IDs: " + << invalidBackends; + return EXIT_FAILURE; + } + + return RunTest(modelFormat, inputTensorShapes, computeDevices, modelPath, inputNames, + inputTensorDataFilePaths, inputTypes, outputTypes, outputNames, + enableProfiling, enableFp16TurboMode, thresholdTime, subgraphId); +} \ No newline at end of file -- cgit v1.2.1