From b3d481a25ee4e8b24f615627122ccb7a7a1028da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=89anna=20=C3=93=20Cath=C3=A1in?= Date: Tue, 26 Feb 2019 11:26:24 +0000 Subject: IVGCVSW-2629 Adding support for uint8 in ExecuteNetwork to fix issues with output tensor being all zero MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Change-Id: I4da1ffd684672fff0853fb053c3340d06a3bc165 Signed-off-by: Éanna Ó Catháin --- tests/ExecuteNetwork/ExecuteNetwork.cpp | 168 ++++++++++++++++++++++---------- 1 file changed, 115 insertions(+), 53 deletions(-) diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp index c040c9b1f8..8904d13d39 100644 --- a/tests/ExecuteNetwork/ExecuteNetwork.cpp +++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp @@ -125,33 +125,51 @@ void CheckOptionDependencies(const po::variables_map& vm) CheckOptionDependency(vm, "input-tensor-shape", "model-path"); } -template -std::vector ParseArray(std::istream& stream); +template +auto ParseDataArray(std::istream & stream); + +template +auto ParseDataArray(std::istream& stream, + const float& quantizationScale, + const int32_t& quantizationOffset); template<> -std::vector ParseArray(std::istream& stream) +auto ParseDataArray(std::istream & stream) { return ParseArrayImpl(stream, [](const std::string& s) { return std::stof(s); }); } template<> -std::vector ParseArray(std::istream& stream) +auto ParseDataArray(std::istream & stream) { - return ParseArrayImpl(stream, - [](const std::string& s) { return boost::numeric_cast(std::stoi(s)); }); + return ParseArrayImpl(stream, [](const std::string & s) { return std::stoi(s); }); } template<> -std::vector ParseArray(std::istream& stream) +auto ParseDataArray(std::istream& stream, + const float& quantizationScale, + const int32_t& quantizationOffset) { - return ParseArrayImpl(stream, [](const std::string& s) { return std::stoi(s); }); + return ParseArrayImpl(stream, + [&quantizationScale, &quantizationOffset](const std::string & s) + { + return boost::numeric_cast( + armnn::Quantize(std::stof(s), + quantizationScale, + quantizationOffset)); + }); } -std::vector ParseInputString(const std::string& inputString, const char * chars) +std::vector ParseArray(std::istream& stream) { - std::stringstream stream(inputString); + return ParseArrayImpl(stream, + [](const std::string& s) { return boost::numeric_cast(std::stoi(s)); }); +} - return ParseArrayImpl(stream, [](const std::string& s) { return boost::trim_copy(s); }, chars); +std::vector ParseStringList(const std::string & inputString, const char * delimiter) +{ + std::stringstream stream(inputString); + return ParseArrayImpl(stream, [](const std::string& s) { return boost::trim_copy(s); }, delimiter); } void RemoveDuplicateDevices(std::vector& computeDevices) @@ -183,6 +201,7 @@ int MainImpl(const char* modelPath, const std::vector>& inputTensorShapes, const std::vector& inputTensorDataFilePaths, const std::vector& inputTypes, + const std::vector& outputTypes, const std::vector& outputNames, bool enableProfiling, const size_t subgraphId, @@ -192,27 +211,6 @@ int MainImpl(const char* modelPath, std::vector inputDataContainers; - for(unsigned int i = 0; i < inputTensorDataFilePaths.size(); ++i) - { - std::ifstream inputTensorFile(inputTensorDataFilePaths[i]); - - if (inputTypes[i].compare("float") == 0) - { - inputDataContainers.push_back(ParseArray(inputTensorFile)); - } - else if (inputTypes[i].compare("int") == 0) - { - inputDataContainers.push_back(ParseArray(inputTensorFile));; - } - else - { - BOOST_LOG_TRIVIAL(fatal) << "Unsupported tensor data type \"" << inputTypes[i] << "\". "; - return EXIT_FAILURE; - } - - inputTensorFile.close(); - } - try { // Creates an InferenceModel, which will parse the model and load it into an IRuntime. @@ -240,12 +238,59 @@ int MainImpl(const char* modelPath, params.m_SubgraphId = subgraphId; InferenceModel model(params, runtime); + for(unsigned int i = 0; i < inputTensorDataFilePaths.size(); ++i) + { + std::ifstream inputTensorFile(inputTensorDataFilePaths[i]); + + if (inputTypes[i].compare("float") == 0) + { + inputDataContainers.push_back( + ParseDataArray(inputTensorFile)); + } + else if (inputTypes[i].compare("int") == 0) + { + inputDataContainers.push_back( + ParseDataArray(inputTensorFile)); + } + else if (inputTypes[i].compare("qasymm8") == 0) + { + auto inputBinding = model.GetInputBindingInfo(); + inputDataContainers.push_back( + ParseDataArray(inputTensorFile, + inputBinding.second.GetQuantizationScale(), + inputBinding.second.GetQuantizationOffset())); + } + else + { + BOOST_LOG_TRIVIAL(fatal) << "Unsupported tensor data type \"" << inputTypes[i] << "\". "; + return EXIT_FAILURE; + } + + inputTensorFile.close(); + } + const size_t numOutputs = params.m_OutputBindings.size(); std::vector outputDataContainers; for (unsigned int i = 0; i < numOutputs; ++i) { - outputDataContainers.push_back(std::vector(model.GetOutputSize(i))); + if (outputTypes[i].compare("float") == 0) + { + outputDataContainers.push_back(std::vector(model.GetOutputSize(i))); + } + else if (outputTypes[i].compare("int") == 0) + { + outputDataContainers.push_back(std::vector(model.GetOutputSize(i))); + } + else if (outputTypes[i].compare("qasymm8") == 0) + { + outputDataContainers.push_back(std::vector(model.GetOutputSize(i))); + } + else + { + BOOST_LOG_TRIVIAL(fatal) << "Unsupported tensor data type \"" << outputTypes[i] << "\". "; + return EXIT_FAILURE; + } } model.Run(inputDataContainers, outputDataContainers); @@ -282,6 +327,7 @@ int RunTest(const std::string& format, const std::string& inputNames, const std::string& inputTensorDataFilePaths, const std::string& inputTypes, + const std::string& outputTypes, const std::string& outputNames, bool enableProfiling, const size_t subgraphId, @@ -289,11 +335,13 @@ int RunTest(const std::string& format, { std::string modelFormat = boost::trim_copy(format); std::string modelPath = boost::trim_copy(path); - std::vector inputNamesVector = ParseInputString(inputNames, ","); - std::vector inputTensorShapesVector = ParseInputString(inputTensorShapesStr, ";"); - std::vector inputTensorDataFilePathsVector = ParseInputString(inputTensorDataFilePaths, ","); - std::vector outputNamesVector = ParseInputString(outputNames, ","); - std::vector inputTypesVector = ParseInputString(inputTypes, ","); + std::vector inputNamesVector = ParseStringList(inputNames, ","); + std::vector inputTensorShapesVector = ParseStringList(inputTensorShapesStr, ";"); + std::vector inputTensorDataFilePathsVector = ParseStringList( + inputTensorDataFilePaths, ","); + std::vector outputNamesVector = ParseStringList(outputNames, ","); + std::vector inputTypesVector = ParseStringList(inputTypes, ","); + std::vector outputTypesVector = ParseStringList(outputTypes, ","); // Parse model binary flag from the model-format string we got from the command-line bool isModelBinary; @@ -327,10 +375,12 @@ int RunTest(const std::string& format, if (inputTypesVector.size() == 0) { //Defaults the value of all inputs to "float" - for(unsigned int i = 0; i < inputNamesVector.size(); ++i) - { - inputTypesVector.push_back("float"); - } + inputTypesVector.assign(inputNamesVector.size(), "float"); + } + if (outputTypesVector.size() == 0) + { + //Defaults the value of all outputs to "float" + outputTypesVector.assign(outputNamesVector.size(), "float"); } else if ((inputTypesVector.size() != 0) && (inputTypesVector.size() != inputNamesVector.size())) { @@ -348,7 +398,7 @@ int RunTest(const std::string& format, for(const std::string& shape : inputTensorShapesVector) { std::stringstream ss(shape); - std::vector dims = ParseArray(ss); + std::vector dims = ParseArray(ss); try { @@ -370,7 +420,7 @@ int RunTest(const std::string& format, return MainImpl( modelPath.c_str(), isModelBinary, computeDevice, inputNamesVector, inputTensorShapes, - inputTensorDataFilePathsVector, inputTypesVector, + inputTensorDataFilePathsVector, inputTypesVector, outputTypesVector, outputNamesVector, enableProfiling, subgraphId, runtime); #else BOOST_LOG_TRIVIAL(fatal) << "Not built with serialization support."; @@ -383,7 +433,8 @@ int RunTest(const std::string& format, return MainImpl(modelPath.c_str(), isModelBinary, computeDevice, inputNamesVector, inputTensorShapes, inputTensorDataFilePathsVector, inputTypesVector, - outputNamesVector, enableProfiling, subgraphId, runtime); + outputTypesVector, outputNamesVector, enableProfiling, + subgraphId, runtime); #else BOOST_LOG_TRIVIAL(fatal) << "Not built with Caffe parser support."; return EXIT_FAILURE; @@ -395,7 +446,8 @@ int RunTest(const std::string& format, return MainImpl(modelPath.c_str(), isModelBinary, computeDevice, inputNamesVector, inputTensorShapes, inputTensorDataFilePathsVector, inputTypesVector, - outputNamesVector, enableProfiling, subgraphId, runtime); + outputTypesVector, outputNamesVector, enableProfiling, + subgraphId, runtime); #else BOOST_LOG_TRIVIAL(fatal) << "Not built with Onnx parser support."; return EXIT_FAILURE; @@ -407,7 +459,8 @@ int RunTest(const std::string& format, return MainImpl(modelPath.c_str(), isModelBinary, computeDevice, inputNamesVector, inputTensorShapes, inputTensorDataFilePathsVector, inputTypesVector, - outputNamesVector, enableProfiling, subgraphId, runtime); + outputTypesVector, outputNamesVector, enableProfiling, + subgraphId, runtime); #else BOOST_LOG_TRIVIAL(fatal) << "Not built with Tensorflow parser support."; return EXIT_FAILURE; @@ -425,8 +478,8 @@ int RunTest(const std::string& format, return MainImpl(modelPath.c_str(), isModelBinary, computeDevice, inputNamesVector, inputTensorShapes, inputTensorDataFilePathsVector, inputTypesVector, - outputNamesVector, enableProfiling, subgraphId, - runtime); + outputTypesVector, outputNamesVector, enableProfiling, + subgraphId, runtime); #else BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat << "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'"; @@ -451,6 +504,7 @@ int RunCsvTest(const armnnUtils::CsvRow &csvRow, std::string inputTensorDataFilePaths; std::string outputNames; std::string inputTypes; + std::string outputTypes; size_t subgraphId = 0; @@ -481,7 +535,10 @@ int RunCsvTest(const armnnUtils::CsvRow &csvRow, "Several paths can be passed separating them by comma.") ("input-type,y",po::value(&inputTypes), "The type of the input tensors in the network separated by comma. " "If unset, defaults to \"float\" for all defined inputs. " - "Accepted values (float or int).") + "Accepted values (float, int or qasymm8).") + ("output-type,z",po::value(&outputTypes), "The type of the output tensors in the network separated by comma. " + "If unset, defaults to \"float\" for all defined outputs. " + "Accepted values (float, int or qasymm8).") ("output-name,o", po::value(&outputNames), "Identifier of the output tensors in the network separated by comma."); } @@ -534,7 +591,7 @@ int RunCsvTest(const armnnUtils::CsvRow &csvRow, } return RunTest(modelFormat, inputTensorShapes, computeDevices, modelPath, inputNames, inputTensorDataFilePaths, - inputTypes, outputNames, enableProfiling, subgraphId); + inputTypes, outputTypes, outputNames, enableProfiling, subgraphId); } int main(int argc, const char* argv[]) @@ -557,6 +614,7 @@ int main(int argc, const char* argv[]) std::string inputTensorDataFilePaths; std::string outputNames; std::string inputTypes; + std::string outputTypes; size_t subgraphId = 0; @@ -593,7 +651,11 @@ int main(int argc, const char* argv[]) "Several paths can be passed separating them by comma. ") ("input-type,y",po::value(&inputTypes), "The type of the input tensors in the network separated by comma. " "If unset, defaults to \"float\" for all defined inputs. " - "Accepted values (float or int)") + "Accepted values (float, int or qasymm8)") + ("output-type,z",po::value(&outputTypes), + "The type of the output tensors in the network separated by comma. " + "If unset, defaults to \"float\" for all defined outputs. " + "Accepted values (float, int or qasymm8).") ("output-name,o", po::value(&outputNames), "Identifier of the output tensors in the network separated by comma.") ("event-based-profiling,e", po::bool_switch()->default_value(false), @@ -737,6 +799,6 @@ int main(int argc, const char* argv[]) } return RunTest(modelFormat, inputTensorShapes, computeDevices, modelPath, inputNames, inputTensorDataFilePaths, - inputTypes, outputNames, enableProfiling, subgraphId); + inputTypes, outputTypes, outputNames, enableProfiling, subgraphId); } } -- cgit v1.2.1