From c82c8732fb514b412012002bd951a84039eca696 Mon Sep 17 00:00:00 2001 From: Aron Virginas-Tar Date: Thu, 24 Oct 2019 17:07:43 +0100 Subject: IVGCVSW-4008 Add profiling mode to ExecuteNetwork * Removed the requirement for specifying a data file for each input tensor * Added the possibility to generate dummy tensor data (filled with 0s) if no data files are specified by the user * Warn the user when they request to save the output to a file, but the input was generate, therefore rendering the output useless Signed-off-by: Aron Virginas-Tar Change-Id: I8baed116dcd99fe380e419db322dc7e04ab1c653 --- .../NetworkExecutionUtils.hpp | 331 +++++++++++++-------- 1 file changed, 202 insertions(+), 129 deletions(-) (limited to 'tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp') diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp index 972939bb98..004e9fbdb2 100644 --- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp +++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp @@ -25,6 +25,7 @@ #include #include +#include #include #include @@ -120,7 +121,6 @@ void CheckOptionDependencies(const po::variables_map& vm) { CheckOptionDependency(vm, "model-path", "model-format"); CheckOptionDependency(vm, "model-path", "input-name"); - CheckOptionDependency(vm, "model-path", "input-tensor-data"); CheckOptionDependency(vm, "model-path", "output-name"); CheckOptionDependency(vm, "input-tensor-shape", "model-path"); } @@ -277,27 +277,104 @@ private: }; -} // namespace + +template> +std::vector GenerateDummyTensorData(unsigned int numElements) +{ + return std::vector(numElements, static_cast(0)); +} + +using TContainer = boost::variant, std::vector, std::vector>; +using QuantizationParams = std::pair; + +void PopulateTensorWithData(TContainer& tensorData, + unsigned int numElements, + const std::string& dataTypeStr, + const armnn::Optional& qParams, + const armnn::Optional& dataFile) +{ + const bool readFromFile = dataFile.has_value() && !dataFile.value().empty(); + const bool quantizeData = qParams.has_value(); + + std::ifstream inputTensorFile; + if (readFromFile) + { + inputTensorFile = std::ifstream(dataFile.value()); + } + + if (dataTypeStr.compare("float") == 0) + { + if (quantizeData) + { + const float qScale = qParams.value().first; + const int qOffset = qParams.value().second; + + tensorData = readFromFile ? + ParseDataArray(inputTensorFile, qScale, qOffset) : + GenerateDummyTensorData(numElements); + } + else + { + tensorData = readFromFile ? + ParseDataArray(inputTensorFile) : + GenerateDummyTensorData(numElements); + } + } + else if (dataTypeStr.compare("int") == 0) + { + tensorData = readFromFile ? + ParseDataArray(inputTensorFile) : + GenerateDummyTensorData(numElements); + } + else if (dataTypeStr.compare("qasymm8") == 0) + { + tensorData = readFromFile ? + ParseDataArray(inputTensorFile) : + GenerateDummyTensorData(numElements); + } + else + { + std::string errorMessage = "Unsupported tensor data type " + dataTypeStr; + BOOST_LOG_TRIVIAL(fatal) << errorMessage; + + inputTensorFile.close(); + throw armnn::Exception(errorMessage); + } + + inputTensorFile.close(); +} + +} // anonymous namespace + +bool generateTensorData = true; + +struct ExecuteNetworkParams +{ + using TensorShapePtr = std::unique_ptr; + + const char* m_ModelPath; + bool m_IsModelBinary; + std::vector m_ComputeDevices; + std::string m_DynamicBackendsPath; + std::vector m_InputNames; + std::vector m_InputTensorShapes; + std::vector m_InputTensorDataFilePaths; + std::vector m_InputTypes; + bool m_QuantizeInput; + std::vector m_OutputTypes; + std::vector m_OutputNames; + std::vector m_OutputTensorFiles; + bool m_EnableProfiling; + bool m_EnableFp16TurboMode; + double m_ThresholdTime; + bool m_PrintIntermediate; + size_t m_SubgraphId; + bool m_EnableLayerDetails = false; + bool m_GenerateTensorData; +}; template -int MainImpl(const char* modelPath, - bool isModelBinary, - const std::vector& computeDevices, - const std::string& dynamicBackendsPath, - const std::vector& inputNames, - const std::vector>& inputTensorShapes, - const std::vector& inputTensorDataFilePaths, - const std::vector& inputTypes, - bool quantizeInput, - const std::vector& outputTypes, - const std::vector& outputNames, - const std::vector& outputTensorFiles, - bool enableProfiling, - bool enableFp16TurboMode, - const double& thresholdTime, - bool printIntermediate, - const size_t subgraphId, - bool enableLayerDetails = false, +int MainImpl(const ExecuteNetworkParams& params, const std::shared_ptr& runtime = nullptr) { using TContainer = boost::variant, std::vector, std::vector>; @@ -307,92 +384,86 @@ int MainImpl(const char* modelPath, try { // Creates an InferenceModel, which will parse the model and load it into an IRuntime. - typename InferenceModel::Params params; - params.m_ModelPath = modelPath; - params.m_IsModelBinary = isModelBinary; - params.m_ComputeDevices = computeDevices; - params.m_DynamicBackendsPath = dynamicBackendsPath; - params.m_PrintIntermediateLayers = printIntermediate; - params.m_VisualizePostOptimizationModel = enableLayerDetails; - - for(const std::string& inputName: inputNames) + typename InferenceModel::Params inferenceModelParams; + inferenceModelParams.m_ModelPath = params.m_ModelPath; + inferenceModelParams.m_IsModelBinary = params.m_IsModelBinary; + inferenceModelParams.m_ComputeDevices = params.m_ComputeDevices; + inferenceModelParams.m_DynamicBackendsPath = params.m_DynamicBackendsPath; + inferenceModelParams.m_PrintIntermediateLayers = params.m_PrintIntermediate; + inferenceModelParams.m_VisualizePostOptimizationModel = params.m_EnableLayerDetails; + + for(const std::string& inputName: params.m_InputNames) { - params.m_InputBindings.push_back(inputName); + inferenceModelParams.m_InputBindings.push_back(inputName); } - for(unsigned int i = 0; i < inputTensorShapes.size(); ++i) + for(unsigned int i = 0; i < params.m_InputTensorShapes.size(); ++i) { - params.m_InputShapes.push_back(*inputTensorShapes[i]); + inferenceModelParams.m_InputShapes.push_back(*params.m_InputTensorShapes[i]); } - for(const std::string& outputName: outputNames) + for(const std::string& outputName: params.m_OutputNames) { - params.m_OutputBindings.push_back(outputName); + inferenceModelParams.m_OutputBindings.push_back(outputName); } - params.m_SubgraphId = subgraphId; - params.m_EnableFp16TurboMode = enableFp16TurboMode; - InferenceModel model(params, enableProfiling, dynamicBackendsPath, runtime); + inferenceModelParams.m_SubgraphId = params.m_SubgraphId; + inferenceModelParams.m_EnableFp16TurboMode = params.m_EnableFp16TurboMode; + + InferenceModel model(inferenceModelParams, + params.m_EnableProfiling, + params.m_DynamicBackendsPath, + runtime); - for(unsigned int i = 0; i < inputTensorDataFilePaths.size(); ++i) + const size_t numInputs = inferenceModelParams.m_InputBindings.size(); + for(unsigned int i = 0; i < numInputs; ++i) { - std::ifstream inputTensorFile(inputTensorDataFilePaths[i]); + armnn::Optional qParams = params.m_QuantizeInput ? + armnn::MakeOptional(model.GetInputQuantizationParams()) : + armnn::EmptyOptional(); - if (inputTypes[i].compare("float") == 0) - { - if (quantizeInput) - { - auto inputBinding = model.GetInputBindingInfo(); - inputDataContainers.push_back( - ParseDataArray(inputTensorFile, - inputBinding.second.GetQuantizationScale(), - inputBinding.second.GetQuantizationOffset())); - } - else - { - inputDataContainers.push_back( - ParseDataArray(inputTensorFile)); - } - } - else if (inputTypes[i].compare("int") == 0) - { - inputDataContainers.push_back( - ParseDataArray(inputTensorFile)); - } - else if (inputTypes[i].compare("qasymm8") == 0) - { - inputDataContainers.push_back( - ParseDataArray(inputTensorFile)); - } - else + armnn::Optional dataFile = params.m_GenerateTensorData ? + armnn::EmptyOptional() : + armnn::MakeOptional(params.m_InputTensorDataFilePaths[i]); + + unsigned int numElements = model.GetInputSize(i); + if (params.m_InputTensorShapes.size() > i && params.m_InputTensorShapes[i]) { - BOOST_LOG_TRIVIAL(fatal) << "Unsupported tensor data type \"" << inputTypes[i] << "\". "; - return EXIT_FAILURE; + // If the user has provided a tensor shape for the current input, + // override numElements + numElements = params.m_InputTensorShapes[i]->GetNumElements(); } - inputTensorFile.close(); + TContainer tensorData; + PopulateTensorWithData(tensorData, + numElements, + params.m_InputTypes[i], + qParams, + dataFile); + + inputDataContainers.push_back(tensorData); } - const size_t numOutputs = params.m_OutputBindings.size(); + const size_t numOutputs = inferenceModelParams.m_OutputBindings.size(); std::vector outputDataContainers; for (unsigned int i = 0; i < numOutputs; ++i) { - if (outputTypes[i].compare("float") == 0) + if (params.m_OutputTypes[i].compare("float") == 0) { outputDataContainers.push_back(std::vector(model.GetOutputSize(i))); } - else if (outputTypes[i].compare("int") == 0) + else if (params.m_OutputTypes[i].compare("int") == 0) { outputDataContainers.push_back(std::vector(model.GetOutputSize(i))); } - else if (outputTypes[i].compare("qasymm8") == 0) + else if (params.m_OutputTypes[i].compare("qasymm8") == 0) { outputDataContainers.push_back(std::vector(model.GetOutputSize(i))); } else { - BOOST_LOG_TRIVIAL(fatal) << "Unsupported tensor data type \"" << outputTypes[i] << "\". "; + BOOST_LOG_TRIVIAL(fatal) << "Unsupported tensor data type \"" << params.m_OutputTypes[i] << "\". "; return EXIT_FAILURE; } } @@ -400,25 +471,35 @@ int MainImpl(const char* modelPath, // model.Run returns the inference time elapsed in EnqueueWorkload (in milliseconds) auto inference_duration = model.Run(inputDataContainers, outputDataContainers); - // Print output tensors - const auto& infosOut = model.GetOutputBindingInfos(); - for (size_t i = 0; i < numOutputs; i++) + // Print output tensors (if requested) + if (!params.m_OutputTensorFiles.empty()) { - const armnn::TensorInfo& infoOut = infosOut[i].second; - auto outputTensorFile = outputTensorFiles.empty() ? "" : outputTensorFiles[i]; - TensorPrinter printer(params.m_OutputBindings[i], infoOut, outputTensorFile); - boost::apply_visitor(printer, outputDataContainers[i]); + if (params.m_GenerateTensorData) + { + BOOST_LOG_TRIVIAL(warning) << "Requested to write output to file, although the input was generated. " + << "Note that the output will not be useful."; + } + + const auto& infosOut = model.GetOutputBindingInfos(); + for (size_t i = 0; i < numOutputs; i++) + { + const armnn::TensorInfo& infoOut = infosOut[i].second; + auto outputTensorFile = params.m_OutputTensorFiles[i]; + + TensorPrinter printer(inferenceModelParams.m_OutputBindings[i], infoOut, outputTensorFile); + boost::apply_visitor(printer, outputDataContainers[i]); + } } BOOST_LOG_TRIVIAL(info) << "\nInference time: " << std::setprecision(2) << std::fixed << inference_duration.count() << " ms"; // If thresholdTime == 0.0 (default), then it hasn't been supplied at command line - if (thresholdTime != 0.0) + if (params.m_ThresholdTime != 0.0) { BOOST_LOG_TRIVIAL(info) << "Threshold time: " << std::setprecision(2) - << std::fixed << thresholdTime << " ms"; - auto thresholdMinusInference = thresholdTime - inference_duration.count(); + << std::fixed << params.m_ThresholdTime << " ms"; + auto thresholdMinusInference = params.m_ThresholdTime - inference_duration.count(); BOOST_LOG_TRIVIAL(info) << "Threshold time - Inference time: " << std::setprecision(2) << std::fixed << thresholdMinusInference << " ms" << "\n"; @@ -428,8 +509,6 @@ int MainImpl(const char* modelPath, return EXIT_FAILURE; } } - - } catch (armnn::Exception const& e) { @@ -443,7 +522,7 @@ int MainImpl(const char* modelPath, // This will run a test int RunTest(const std::string& format, const std::string& inputTensorShapesStr, - const vector& computeDevice, + const vector& computeDevices, const std::string& dynamicBackendsPath, const std::string& path, const std::string& inputNames, @@ -558,20 +637,42 @@ int RunTest(const std::string& format, // Check that threshold time is not less than zero if (thresholdTime < 0) { - BOOST_LOG_TRIVIAL(fatal) << "Threshold time supplied as a commoand line argument is less than zero."; + BOOST_LOG_TRIVIAL(fatal) << "Threshold time supplied as a command line argument is less than zero."; return EXIT_FAILURE; } + ExecuteNetworkParams params; + params.m_ModelPath = modelPath.c_str(); + params.m_IsModelBinary = isModelBinary; + params.m_ComputeDevices = computeDevices; + params.m_DynamicBackendsPath = dynamicBackendsPath; + params.m_InputNames = inputNamesVector; + params.m_InputTensorShapes = std::move(inputTensorShapes); + params.m_InputTensorDataFilePaths = inputTensorDataFilePathsVector; + params.m_InputTypes = inputTypesVector; + params.m_QuantizeInput = quantizeInput; + params.m_OutputTypes = outputTypesVector; + params.m_OutputNames = outputNamesVector; + params.m_OutputTensorFiles = outputTensorFilesVector; + params.m_EnableProfiling = enableProfiling; + params.m_EnableFp16TurboMode = enableFp16TurboMode; + params.m_ThresholdTime = thresholdTime; + params.m_PrintIntermediate = printIntermediate; + params.m_SubgraphId = subgraphId; + params.m_EnableLayerDetails = enableLayerDetails; + params.m_GenerateTensorData = inputTensorDataFilePathsVector.empty(); + + // Warn if ExecuteNetwork will generate dummy input data + if (params.m_GenerateTensorData) + { + BOOST_LOG_TRIVIAL(warning) << "No input files provided, input tensors will be filled with 0s."; + } + // Forward to implementation based on the parser type if (modelFormat.find("armnn") != std::string::npos) { #if defined(ARMNN_SERIALIZER) - return MainImpl( - modelPath.c_str(), isModelBinary, computeDevice, - dynamicBackendsPath, inputNamesVector, inputTensorShapes, - inputTensorDataFilePathsVector, inputTypesVector, quantizeInput, - outputTypesVector, outputNamesVector, outputTensorFilesVector, enableProfiling, - enableFp16TurboMode, thresholdTime, printIntermediate, subgraphId, enableLayerDetails, runtime); + return MainImpl(params, runtime); #else BOOST_LOG_TRIVIAL(fatal) << "Not built with serialization support."; return EXIT_FAILURE; @@ -580,15 +681,7 @@ int RunTest(const std::string& format, else if (modelFormat.find("caffe") != std::string::npos) { #if defined(ARMNN_CAFFE_PARSER) - return MainImpl(modelPath.c_str(), isModelBinary, computeDevice, - dynamicBackendsPath, - inputNamesVector, inputTensorShapes, - inputTensorDataFilePathsVector, inputTypesVector, - quantizeInput, outputTypesVector, outputNamesVector, - outputTensorFilesVector, enableProfiling, - enableFp16TurboMode, thresholdTime, - printIntermediate, subgraphId, enableLayerDetails, - runtime); + return MainImpl(params, runtime); #else BOOST_LOG_TRIVIAL(fatal) << "Not built with Caffe parser support."; return EXIT_FAILURE; @@ -597,14 +690,7 @@ int RunTest(const std::string& format, else if (modelFormat.find("onnx") != std::string::npos) { #if defined(ARMNN_ONNX_PARSER) - return MainImpl(modelPath.c_str(), isModelBinary, computeDevice, - dynamicBackendsPath, - inputNamesVector, inputTensorShapes, - inputTensorDataFilePathsVector, inputTypesVector, - quantizeInput, outputTypesVector, outputNamesVector, - outputTensorFilesVector, enableProfiling, enableFp16TurboMode, - thresholdTime,printIntermediate, subgraphId, - enableLayerDetails, runtime); + return MainImpl(params, runtime); #else BOOST_LOG_TRIVIAL(fatal) << "Not built with Onnx parser support."; return EXIT_FAILURE; @@ -613,14 +699,7 @@ int RunTest(const std::string& format, else if (modelFormat.find("tensorflow") != std::string::npos) { #if defined(ARMNN_TF_PARSER) - return MainImpl(modelPath.c_str(), isModelBinary, computeDevice, - dynamicBackendsPath, - inputNamesVector, inputTensorShapes, - inputTensorDataFilePathsVector, inputTypesVector, - quantizeInput, outputTypesVector, outputNamesVector, - outputTensorFilesVector, enableProfiling, enableFp16TurboMode, - thresholdTime,printIntermediate, subgraphId, - enableLayerDetails, runtime); + return MainImpl(params, runtime); #else BOOST_LOG_TRIVIAL(fatal) << "Not built with Tensorflow parser support."; return EXIT_FAILURE; @@ -635,14 +714,7 @@ int RunTest(const std::string& format, for tflite files"; return EXIT_FAILURE; } - return MainImpl(modelPath.c_str(), isModelBinary, computeDevice, - dynamicBackendsPath, - inputNamesVector, inputTensorShapes, - inputTensorDataFilePathsVector, inputTypesVector, - quantizeInput, outputTypesVector, outputNamesVector, - outputTensorFilesVector, enableProfiling, - enableFp16TurboMode, thresholdTime, printIntermediate, - subgraphId, enableLayerDetails, runtime); + return MainImpl(params, runtime); #else BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat << "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'"; @@ -699,9 +771,10 @@ int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptrdefault_value(""), "Path to files containing the input data as a flat array separated by whitespace. " - "Several paths can be passed separating them by comma.") + "Several paths can be passed separating them by comma. If not specified, the network will be run with dummy " + "data (useful for profiling).") ("input-type,y",po::value(&inputTypes), "The type of the input tensors in the network separated by comma. " "If unset, defaults to \"float\" for all defined inputs. " "Accepted values (float, int or qasymm8).") -- cgit v1.2.1