From 15f7faef88357679064e0e9d3bd91dd18c7625d6 Mon Sep 17 00:00:00 2001 From: Sadik Armagan Date: Wed, 18 Nov 2020 09:37:03 +0000 Subject: IVGCVSW-5377 'Add ArmNN TfLite delegate to ExecuteNetwork' * Assign correct input values for the model * Call the right Validate function for Mul and Sub operators * Return the correct data type for kTfLiteInt8 Signed-off-by: Sadik Armagan Change-Id: I6d23adf68d33d8be9a1fbf5d19dfe47939a6d3d6 --- tests/ExecuteNetwork/ExecuteNetwork.cpp | 49 ++++++++++++++++++++++++--------- 1 file changed, 36 insertions(+), 13 deletions(-) (limited to 'tests') diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp index fa84a6ee4f..ba7ce29cd7 100644 --- a/tests/ExecuteNetwork/ExecuteNetwork.cpp +++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp @@ -77,6 +77,14 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, for(unsigned int inputIndex = 0; inputIndex < numInputs; ++inputIndex) { int input = tfLiteInterpreter->inputs()[inputIndex]; + TfLiteIntArray* inputDims = tfLiteInterpreter->tensor(input)->dims; + + long inputSize = 1; + for (unsigned int dim = 0; dim < static_cast(inputDims->size); ++dim) + { + inputSize *= inputDims->data[dim]; + } + if (params.m_InputTypes[inputIndex].compare("float") == 0) { auto inputData = tfLiteInterpreter->typed_tensor(input); @@ -86,8 +94,15 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, params.m_InputTypes[inputIndex], armnn::EmptyOptional(), dataFile); - inputData = reinterpret_cast(&tensorData); - armnn::IgnoreUnused(inputData); + + mapbox::util::apply_visitor([&](auto&& value) + { + for (unsigned int i = 0; i < inputSize; ++i) + { + inputData[i] = value.data()[i]; + } + }, + tensorData); } else if (params.m_InputTypes[inputIndex].compare("int") == 0) { @@ -98,8 +113,14 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, params.m_InputTypes[inputIndex], armnn::EmptyOptional(), dataFile); - inputData = reinterpret_cast(&tensorData); - armnn::IgnoreUnused(inputData); + mapbox::util::apply_visitor([&](auto&& value) + { + for (unsigned int i = 0; i < inputSize; ++i) + { + inputData[i] = value.data()[i]; + } + }, + tensorData); } else if (params.m_InputTypes[inputIndex].compare("qasymm8") == 0) { @@ -110,8 +131,14 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, params.m_InputTypes[inputIndex], armnn::EmptyOptional(), dataFile); - inputData = reinterpret_cast(&tensorData); - armnn::IgnoreUnused(inputData); + mapbox::util::apply_visitor([&](auto&& value) + { + for (unsigned int i = 0; i < inputSize; ++i) + { + inputData[i] = value.data()[i]; + } + }, + tensorData); } else { @@ -128,21 +155,19 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, // Print out the output for (unsigned int outputIndex = 0; outputIndex < params.m_OutputNames.size(); ++outputIndex) { - std::cout << "Printing out the output" << std::endl; auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[outputIndex]; - TfLiteIntArray *outputDims = tfLiteInterpreter->tensor(tfLiteDelegateOutputId)->dims; + TfLiteIntArray* outputDims = tfLiteInterpreter->tensor(tfLiteDelegateOutputId)->dims; - int outputSize = 1; + long outputSize = 1; for (unsigned int dim = 0; dim < static_cast(outputDims->size); ++dim) { - outputSize *= outputDims->data[dim]; + outputSize *= outputDims->data[dim]; } std::cout << params.m_OutputNames[outputIndex] << ": "; if (params.m_OutputTypes[outputIndex].compare("float") == 0) { auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); - if(tfLiteDelageOutputData == NULL) { ARMNN_LOG(fatal) << "Output tensor is null, output type: " @@ -162,7 +187,6 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, else if (params.m_OutputTypes[outputIndex].compare("int") == 0) { auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); - if(tfLiteDelageOutputData == NULL) { ARMNN_LOG(fatal) << "Output tensor is null, output type: " @@ -182,7 +206,6 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, else if (params.m_OutputTypes[outputIndex].compare("qasymm8") == 0) { auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); - if(tfLiteDelageOutputData == NULL) { ARMNN_LOG(fatal) << "Output tensor is null, output type: " -- cgit v1.2.1