diff options
author | Sadik Armagan <sadik.armagan@arm.com> | 2020-11-18 09:37:03 +0000 |
---|---|---|
committer | Sadik Armagan <sadik.armagan@arm.com> | 2020-11-18 09:37:03 +0000 |
commit | 15f7faef88357679064e0e9d3bd91dd18c7625d6 (patch) | |
tree | d8dcfc49dc67614feb5edf0dc1ff49b7ca2581d4 /tests | |
parent | 5d03e31aaf4d82e9f9cdc03c41d2328bbb2a0dee (diff) | |
download | armnn-15f7faef88357679064e0e9d3bd91dd18c7625d6.tar.gz |
IVGCVSW-5377 'Add ArmNN TfLite delegate to ExecuteNetwork'
* Assign correct input values for the model
* Call the right Validate function for Mul and Sub operators
* Return the correct data type for kTfLiteInt8
Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: I6d23adf68d33d8be9a1fbf5d19dfe47939a6d3d6
Diffstat (limited to 'tests')
-rw-r--r-- | tests/ExecuteNetwork/ExecuteNetwork.cpp | 49 |
1 files changed, 36 insertions, 13 deletions
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp index fa84a6ee4f..ba7ce29cd7 100644 --- a/tests/ExecuteNetwork/ExecuteNetwork.cpp +++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp @@ -77,6 +77,14 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, for(unsigned int inputIndex = 0; inputIndex < numInputs; ++inputIndex) { int input = tfLiteInterpreter->inputs()[inputIndex]; + TfLiteIntArray* inputDims = tfLiteInterpreter->tensor(input)->dims; + + long inputSize = 1; + for (unsigned int dim = 0; dim < static_cast<unsigned int>(inputDims->size); ++dim) + { + inputSize *= inputDims->data[dim]; + } + if (params.m_InputTypes[inputIndex].compare("float") == 0) { auto inputData = tfLiteInterpreter->typed_tensor<float>(input); @@ -86,8 +94,15 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, params.m_InputTypes[inputIndex], armnn::EmptyOptional(), dataFile); - inputData = reinterpret_cast<float*>(&tensorData); - armnn::IgnoreUnused(inputData); + + mapbox::util::apply_visitor([&](auto&& value) + { + for (unsigned int i = 0; i < inputSize; ++i) + { + inputData[i] = value.data()[i]; + } + }, + tensorData); } else if (params.m_InputTypes[inputIndex].compare("int") == 0) { @@ -98,8 +113,14 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, params.m_InputTypes[inputIndex], armnn::EmptyOptional(), dataFile); - inputData = reinterpret_cast<int32_t*>(&tensorData); - armnn::IgnoreUnused(inputData); + mapbox::util::apply_visitor([&](auto&& value) + { + for (unsigned int i = 0; i < inputSize; ++i) + { + inputData[i] = value.data()[i]; + } + }, + tensorData); } else if (params.m_InputTypes[inputIndex].compare("qasymm8") == 0) { @@ -110,8 +131,14 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, params.m_InputTypes[inputIndex], armnn::EmptyOptional(), dataFile); - inputData = reinterpret_cast<uint8_t*>(&tensorData); - armnn::IgnoreUnused(inputData); + mapbox::util::apply_visitor([&](auto&& value) + { + for (unsigned int i = 0; i < inputSize; ++i) + { + inputData[i] = value.data()[i]; + } + }, + tensorData); } else { @@ -128,21 +155,19 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, // Print out the output for (unsigned int outputIndex = 0; outputIndex < params.m_OutputNames.size(); ++outputIndex) { - std::cout << "Printing out the output" << std::endl; auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[outputIndex]; - TfLiteIntArray *outputDims = tfLiteInterpreter->tensor(tfLiteDelegateOutputId)->dims; + TfLiteIntArray* outputDims = tfLiteInterpreter->tensor(tfLiteDelegateOutputId)->dims; - int outputSize = 1; + long outputSize = 1; for (unsigned int dim = 0; dim < static_cast<unsigned int>(outputDims->size); ++dim) { - outputSize *= outputDims->data[dim]; + outputSize *= outputDims->data[dim]; } std::cout << params.m_OutputNames[outputIndex] << ": "; if (params.m_OutputTypes[outputIndex].compare("float") == 0) { auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId); - if(tfLiteDelageOutputData == NULL) { ARMNN_LOG(fatal) << "Output tensor is null, output type: " @@ -162,7 +187,6 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, else if (params.m_OutputTypes[outputIndex].compare("int") == 0) { auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<int32_t>(tfLiteDelegateOutputId); - if(tfLiteDelageOutputData == NULL) { ARMNN_LOG(fatal) << "Output tensor is null, output type: " @@ -182,7 +206,6 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, else if (params.m_OutputTypes[outputIndex].compare("qasymm8") == 0) { auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<uint8_t>(tfLiteDelegateOutputId); - if(tfLiteDelageOutputData == NULL) { ARMNN_LOG(fatal) << "Output tensor is null, output type: " |