From 610256fdfd9fc771b7213a9134c86c5988e77fdc Mon Sep 17 00:00:00 2001 From: Narumol Prangnawarat Date: Wed, 26 Jun 2019 15:10:46 +0100 Subject: IVGCVSW-3193 Allow ExecuteNetwork to have qasymm8 input type and add option to quantize float inputs to qasymm8 Signed-off-by: Narumol Prangnawarat Change-Id: I54b13b8b53c31c05658fe9c310ca5a66df759aa5 --- tests/ExecuteNetwork/ExecuteNetwork.cpp | 7 ++- .../NetworkExecutionUtils.hpp | 62 +++++++++++++++------- 2 files changed, 50 insertions(+), 19 deletions(-) (limited to 'tests') diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp index 60353dbf3f..a8f3b3d71d 100644 --- a/tests/ExecuteNetwork/ExecuteNetwork.cpp +++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp @@ -66,6 +66,10 @@ int main(int argc, const char* argv[]) ("input-type,y",po::value(&inputTypes), "The type of the input tensors in the network separated by comma. " "If unset, defaults to \"float\" for all defined inputs. " "Accepted values (float, int or qasymm8)") + ("quantize-input,q",po::bool_switch()->default_value(false), + "If this option is enabled, all float inputs will be quantized to qasymm8. " + "If unset, default to not quantized. " + "Accepted values (true or false)") ("output-type,z",po::value(&outputTypes), "The type of the output tensors in the network separated by comma. " "If unset, defaults to \"float\" for all defined outputs. " @@ -119,6 +123,7 @@ int main(int argc, const char* argv[]) bool concurrent = vm["concurrent"].as(); bool enableProfiling = vm["event-based-profiling"].as(); bool enableFp16TurboMode = vm["fp16-turbo-mode"].as(); + bool quantizeInput = vm["quantize-input"].as(); // Check whether we have to load test cases from a file. if (CheckOption(vm, "test-cases")) @@ -220,7 +225,7 @@ int main(int argc, const char* argv[]) } return RunTest(modelFormat, inputTensorShapes, computeDevices, modelPath, inputNames, - inputTensorDataFilePaths, inputTypes, outputTypes, outputNames, + inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames, enableProfiling, enableFp16TurboMode, thresholdTime, subgraphId); } } diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp index 9d7e368dad..440dcf9aa8 100644 --- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp +++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp @@ -145,6 +145,13 @@ auto ParseDataArray(std::istream & stream) return ParseArrayImpl(stream, [](const std::string & s) { return std::stoi(s); }); } +template<> +auto ParseDataArray(std::istream& stream) +{ + return ParseArrayImpl(stream, + [](const std::string& s) { return boost::numeric_cast(std::stoi(s)); }); +} + template<> auto ParseDataArray(std::istream& stream, const float& quantizationScale, @@ -159,7 +166,6 @@ auto ParseDataArray(std::istream& stream, quantizationOffset)); }); } - std::vector ParseArray(std::istream& stream) { return ParseArrayImpl(stream, @@ -252,6 +258,7 @@ int MainImpl(const char* modelPath, const std::vector>& inputTensorShapes, const std::vector& inputTensorDataFilePaths, const std::vector& inputTypes, + bool quantizeInput, const std::vector& outputTypes, const std::vector& outputNames, bool enableProfiling, @@ -297,8 +304,19 @@ int MainImpl(const char* modelPath, if (inputTypes[i].compare("float") == 0) { - inputDataContainers.push_back( - ParseDataArray(inputTensorFile)); + if (quantizeInput) + { + auto inputBinding = model.GetInputBindingInfo(); + inputDataContainers.push_back( + ParseDataArray(inputTensorFile, + inputBinding.second.GetQuantizationScale(), + inputBinding.second.GetQuantizationOffset())); + } + else + { + inputDataContainers.push_back( + ParseDataArray(inputTensorFile)); + } } else if (inputTypes[i].compare("int") == 0) { @@ -307,11 +325,8 @@ int MainImpl(const char* modelPath, } else if (inputTypes[i].compare("qasymm8") == 0) { - auto inputBinding = model.GetInputBindingInfo(); inputDataContainers.push_back( - ParseDataArray(inputTensorFile, - inputBinding.second.GetQuantizationScale(), - inputBinding.second.GetQuantizationOffset())); + ParseDataArray(inputTensorFile)); } else { @@ -396,6 +411,7 @@ int RunTest(const std::string& format, const std::string& inputNames, const std::string& inputTensorDataFilePaths, const std::string& inputTypes, + bool quantizeInput, const std::string& outputTypes, const std::string& outputNames, bool enableProfiling, @@ -498,7 +514,7 @@ int RunTest(const std::string& format, return MainImpl( modelPath.c_str(), isModelBinary, computeDevice, inputNamesVector, inputTensorShapes, - inputTensorDataFilePathsVector, inputTypesVector, + inputTensorDataFilePathsVector, inputTypesVector, quantizeInput, outputTypesVector, outputNamesVector, enableProfiling, enableFp16TurboMode, thresholdTime, subgraphId, runtime); #else @@ -512,8 +528,9 @@ int RunTest(const std::string& format, return MainImpl(modelPath.c_str(), isModelBinary, computeDevice, inputNamesVector, inputTensorShapes, inputTensorDataFilePathsVector, inputTypesVector, - outputTypesVector, outputNamesVector, enableProfiling, - enableFp16TurboMode, thresholdTime, subgraphId, runtime); + quantizeInput, outputTypesVector, outputNamesVector, + enableProfiling, enableFp16TurboMode, thresholdTime, + subgraphId, runtime); #else BOOST_LOG_TRIVIAL(fatal) << "Not built with Caffe parser support."; return EXIT_FAILURE; @@ -525,8 +542,9 @@ int RunTest(const std::string& format, return MainImpl(modelPath.c_str(), isModelBinary, computeDevice, inputNamesVector, inputTensorShapes, inputTensorDataFilePathsVector, inputTypesVector, - outputTypesVector, outputNamesVector, enableProfiling, - enableFp16TurboMode, thresholdTime, subgraphId, runtime); + quantizeInput, outputTypesVector, outputNamesVector, + enableProfiling, enableFp16TurboMode, thresholdTime, + subgraphId, runtime); #else BOOST_LOG_TRIVIAL(fatal) << "Not built with Onnx parser support."; return EXIT_FAILURE; @@ -538,8 +556,9 @@ int RunTest(const std::string& format, return MainImpl(modelPath.c_str(), isModelBinary, computeDevice, inputNamesVector, inputTensorShapes, inputTensorDataFilePathsVector, inputTypesVector, - outputTypesVector, outputNamesVector, enableProfiling, - enableFp16TurboMode, thresholdTime, subgraphId, runtime); + quantizeInput, outputTypesVector, outputNamesVector, + enableProfiling, enableFp16TurboMode, thresholdTime, + subgraphId, runtime); #else BOOST_LOG_TRIVIAL(fatal) << "Not built with Tensorflow parser support."; return EXIT_FAILURE; @@ -557,9 +576,9 @@ int RunTest(const std::string& format, return MainImpl(modelPath.c_str(), isModelBinary, computeDevice, inputNamesVector, inputTensorShapes, inputTensorDataFilePathsVector, inputTypesVector, - outputTypesVector, outputNamesVector, enableProfiling, - enableFp16TurboMode, thresholdTime, subgraphId, - runtime); + quantizeInput, outputTypesVector, outputNamesVector, + enableProfiling, enableFp16TurboMode, thresholdTime, + subgraphId, runtime); #else BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat << "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'"; @@ -616,6 +635,10 @@ int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptrdefault_value(false), + "If this option is enabled, all float inputs will be quantized to qasymm8. " + "If unset, default to not quantized. " + "Accepted values (true or false)") ("output-type,z",po::value(&outputTypes), "The type of the output tensors in the network separated by comma. " "If unset, defaults to \"float\" for all defined outputs. " "Accepted values (float, int or qasymm8).") @@ -655,6 +678,9 @@ int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptr(); + // Get the preferred order of compute devices. std::vector computeDevices = vm["compute"].as>(); @@ -671,6 +697,6 @@ int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptr