aboutsummaryrefslogtreecommitdiff
path: root/tests/ExecuteNetwork/ExecuteNetwork.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'tests/ExecuteNetwork/ExecuteNetwork.cpp')
-rw-r--r--tests/ExecuteNetwork/ExecuteNetwork.cpp26
1 files changed, 4 insertions, 22 deletions
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index 085721c6bb..f321a26009 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -154,7 +154,8 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, const armnn::IRun
std::copy(tensorData.begin(), tensorData.end(), inputData);
}
- else if (params.m_InputTypes[inputIndex].compare("qsymms8") == 0)
+ else if (params.m_InputTypes[inputIndex].compare("qsymms8") == 0 ||
+ params.m_InputTypes[inputIndex].compare("qasymms8") == 0)
{
auto inputData = tfLiteInterpreter->typed_tensor<int8_t>(input);
@@ -215,26 +216,6 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, const armnn::IRun
std::copy(tensorData.begin(), tensorData.end(), inputData);
}
- else if (params.m_InputTypes[inputIndex].compare("qasymms8") == 0)
- {
- auto inputData = tfLiteInterpreter->typed_tensor<int8_t>(input);
-
- if(inputData == NULL)
- {
- ARMNN_LOG(fatal) << "Input tensor is null, input type: "
- "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
- return EXIT_FAILURE;
- }
-
- std::vector<int8_t> tensorData;
- PopulateTensorWithDataGeneric<int8_t>(tensorData,
- inputSize,
- dataFile,
- [](const std::string& s)
- { return armnn::numeric_cast<int8_t>(std::stoi(s)); });
-
- std::copy(tensorData.begin(), tensorData.end(), inputData);
- }
else
{
ARMNN_LOG(fatal) << "Unsupported input tensor data type \"" << params.m_InputTypes[inputIndex] << "\". ";
@@ -339,7 +320,8 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, const armnn::IRun
}
}
}
- else if (params.m_OutputTypes[outputIndex].compare("qsymms8") == 0)
+ else if (params.m_OutputTypes[outputIndex].compare("qsymms8") == 0 ||
+ params.m_OutputTypes[outputIndex].compare("qasymms8") == 0)
{
auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<int8_t>(tfLiteDelegateOutputId);
if(tfLiteDelageOutputData == NULL)