From d7ed6d4e53a877a25fcea754d76c8831451f18f1 Mon Sep 17 00:00:00 2001 From: Mike Kelly Date: Wed, 21 Jul 2021 09:42:43 +0100 Subject: GitHub #557 wrong result in int8 model * Added support for qasymms8 (int8) to ImageTensorGenerator * Added qasymmu8 as alias for qasymm8 in ImageTensorGenerator * Added support for qasymms8 (int8) to ExecuteNetwork * Added qasymmu8 as alias for qasymm8 in ExecuteNetwork * Set tflite to be the default model format in ImageTensorGenerator as it's the only supported model format. Signed-off-by: Mike Kelly Change-Id: Ieda7b78e668ea390e3565cd65a41fe0a9c8a5b83 --- tests/ExecuteNetwork/ExecuteNetwork.cpp | 36 ++++++++++++++++++++++++++++----- 1 file changed, 31 insertions(+), 5 deletions(-) (limited to 'tests/ExecuteNetwork/ExecuteNetwork.cpp') diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp index bce83583cc..a9b5a3c3f4 100644 --- a/tests/ExecuteNetwork/ExecuteNetwork.cpp +++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp @@ -155,7 +155,8 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, std::copy(tensorData.begin(), tensorData.end(), inputData); } - else if (params.m_InputTypes[inputIndex].compare("qasymm8") == 0) + else if (params.m_InputTypes[inputIndex].compare("qasymm8") == 0 || + params.m_InputTypes[inputIndex].compare("qasymmu8") == 0) { auto inputData = tfLiteInterpreter->typed_tensor(input); @@ -175,6 +176,26 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, std::copy(tensorData.begin(), tensorData.end(), inputData); } + else if (params.m_InputTypes[inputIndex].compare("qasymms8") == 0) + { + auto inputData = tfLiteInterpreter->typed_tensor(input); + + if(inputData == NULL) + { + ARMNN_LOG(fatal) << "Input tensor is null, input type: " + "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect."; + return EXIT_FAILURE; + } + + std::vector tensorData; + PopulateTensorWithDataGeneric(tensorData, + params.m_InputTensorShapes[inputIndex]->GetNumElements(), + dataFile, + [](const std::string& s) + { return armnn::numeric_cast(std::stoi(s)); }); + + std::copy(tensorData.begin(), tensorData.end(), inputData); + } else { ARMNN_LOG(fatal) << "Unsupported input tensor data type \"" << params.m_InputTypes[inputIndex] << "\". "; @@ -245,7 +266,8 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, printf("%d ", tfLiteDelageOutputData[i]); } } - else if (params.m_OutputTypes[outputIndex].compare("qasymm8") == 0) + else if (params.m_OutputTypes[outputIndex].compare("qasymm8") == 0 || + params.m_OutputTypes[outputIndex].compare("qasymmu8") == 0) { auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); if(tfLiteDelageOutputData == NULL) @@ -374,13 +396,17 @@ int MainImpl(const ExecuteNetworkParams& params, if (params.m_OutputTypes[i].compare("float") == 0) { outputDataContainers.push_back(std::vector(model.GetOutputSize(i))); - } else if (params.m_OutputTypes[i].compare("int") == 0) + } + else if (params.m_OutputTypes[i].compare("int") == 0) { outputDataContainers.push_back(std::vector(model.GetOutputSize(i))); - } else if (params.m_OutputTypes[i].compare("qasymm8") == 0) + } + else if (params.m_OutputTypes[i].compare("qasymm8") == 0 || + params.m_OutputTypes[i].compare("qasymmu8") == 0) { outputDataContainers.push_back(std::vector(model.GetOutputSize(i))); - } else if (params.m_OutputTypes[i].compare("qsymms8") == 0) + } + else if (params.m_OutputTypes[i].compare("qasymms8") == 0) { outputDataContainers.push_back(std::vector(model.GetOutputSize(i))); } else -- cgit v1.2.1