From 15f7faef88357679064e0e9d3bd91dd18c7625d6 Mon Sep 17 00:00:00 2001 From: Sadik Armagan Date: Wed, 18 Nov 2020 09:37:03 +0000 Subject: IVGCVSW-5377 'Add ArmNN TfLite delegate to ExecuteNetwork' * Assign correct input values for the model * Call the right Validate function for Mul and Sub operators * Return the correct data type for kTfLiteInt8 Signed-off-by: Sadik Armagan Change-Id: I6d23adf68d33d8be9a1fbf5d19dfe47939a6d3d6 --- delegate/src/DelegateUtils.hpp | 16 +++++++++-- delegate/src/ElementwiseBinary.hpp | 4 +-- tests/ExecuteNetwork/ExecuteNetwork.cpp | 49 ++++++++++++++++++++++++--------- 3 files changed, 52 insertions(+), 17 deletions(-) diff --git a/delegate/src/DelegateUtils.hpp b/delegate/src/DelegateUtils.hpp index 0537ba911b..fad07ff267 100644 --- a/delegate/src/DelegateUtils.hpp +++ b/delegate/src/DelegateUtils.hpp @@ -342,14 +342,26 @@ armnn::DataType GetDataType(const TfLiteTensor& tfLiteTensor) case kTfLiteUInt8: return armnn::DataType::QAsymmU8; case kTfLiteInt8: - if (tfLiteTensor.params.zero_point == 0) + { + auto quantizationInfo = tfLiteTensor.quantization; + if (quantizationInfo.type == kTfLiteAffineQuantization) { - return armnn::DataType::QSymmS8; + auto* quantization = + reinterpret_cast(tfLiteTensor.quantization.params); + if (quantization->zero_point != nullptr && quantization->zero_point->size == 1) + { + return armnn::DataType::QAsymmS8; + } + else + { + return armnn::DataType::QSymmS8; + } } else { return armnn::DataType::QAsymmS8; } + } case kTfLiteInt16: return armnn::DataType::QSymmS16; case kTfLiteInt32: diff --git a/delegate/src/ElementwiseBinary.hpp b/delegate/src/ElementwiseBinary.hpp index e5270057f5..49a5dfb0d9 100644 --- a/delegate/src/ElementwiseBinary.hpp +++ b/delegate/src/ElementwiseBinary.hpp @@ -228,13 +228,13 @@ TfLiteStatus VisitElementwiseBinaryOperator(DelegateData& delegateData, inputTensorInfo1, outputTensorInfo); case kTfLiteBuiltinMul: - return ValidateDivOperator(delegateData, + return ValidateMulOperator(delegateData, tfLiteContext, inputTensorInfo0, inputTensorInfo1, outputTensorInfo); case kTfLiteBuiltinSub: - return ValidateDivOperator(delegateData, + return ValidateSubOperator(delegateData, tfLiteContext, inputTensorInfo0, inputTensorInfo1, diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp index fa84a6ee4f..ba7ce29cd7 100644 --- a/tests/ExecuteNetwork/ExecuteNetwork.cpp +++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp @@ -77,6 +77,14 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, for(unsigned int inputIndex = 0; inputIndex < numInputs; ++inputIndex) { int input = tfLiteInterpreter->inputs()[inputIndex]; + TfLiteIntArray* inputDims = tfLiteInterpreter->tensor(input)->dims; + + long inputSize = 1; + for (unsigned int dim = 0; dim < static_cast(inputDims->size); ++dim) + { + inputSize *= inputDims->data[dim]; + } + if (params.m_InputTypes[inputIndex].compare("float") == 0) { auto inputData = tfLiteInterpreter->typed_tensor(input); @@ -86,8 +94,15 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, params.m_InputTypes[inputIndex], armnn::EmptyOptional(), dataFile); - inputData = reinterpret_cast(&tensorData); - armnn::IgnoreUnused(inputData); + + mapbox::util::apply_visitor([&](auto&& value) + { + for (unsigned int i = 0; i < inputSize; ++i) + { + inputData[i] = value.data()[i]; + } + }, + tensorData); } else if (params.m_InputTypes[inputIndex].compare("int") == 0) { @@ -98,8 +113,14 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, params.m_InputTypes[inputIndex], armnn::EmptyOptional(), dataFile); - inputData = reinterpret_cast(&tensorData); - armnn::IgnoreUnused(inputData); + mapbox::util::apply_visitor([&](auto&& value) + { + for (unsigned int i = 0; i < inputSize; ++i) + { + inputData[i] = value.data()[i]; + } + }, + tensorData); } else if (params.m_InputTypes[inputIndex].compare("qasymm8") == 0) { @@ -110,8 +131,14 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, params.m_InputTypes[inputIndex], armnn::EmptyOptional(), dataFile); - inputData = reinterpret_cast(&tensorData); - armnn::IgnoreUnused(inputData); + mapbox::util::apply_visitor([&](auto&& value) + { + for (unsigned int i = 0; i < inputSize; ++i) + { + inputData[i] = value.data()[i]; + } + }, + tensorData); } else { @@ -128,21 +155,19 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, // Print out the output for (unsigned int outputIndex = 0; outputIndex < params.m_OutputNames.size(); ++outputIndex) { - std::cout << "Printing out the output" << std::endl; auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[outputIndex]; - TfLiteIntArray *outputDims = tfLiteInterpreter->tensor(tfLiteDelegateOutputId)->dims; + TfLiteIntArray* outputDims = tfLiteInterpreter->tensor(tfLiteDelegateOutputId)->dims; - int outputSize = 1; + long outputSize = 1; for (unsigned int dim = 0; dim < static_cast(outputDims->size); ++dim) { - outputSize *= outputDims->data[dim]; + outputSize *= outputDims->data[dim]; } std::cout << params.m_OutputNames[outputIndex] << ": "; if (params.m_OutputTypes[outputIndex].compare("float") == 0) { auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); - if(tfLiteDelageOutputData == NULL) { ARMNN_LOG(fatal) << "Output tensor is null, output type: " @@ -162,7 +187,6 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, else if (params.m_OutputTypes[outputIndex].compare("int") == 0) { auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); - if(tfLiteDelageOutputData == NULL) { ARMNN_LOG(fatal) << "Output tensor is null, output type: " @@ -182,7 +206,6 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, else if (params.m_OutputTypes[outputIndex].compare("qasymm8") == 0) { auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); - if(tfLiteDelageOutputData == NULL) { ARMNN_LOG(fatal) << "Output tensor is null, output type: " -- cgit v1.2.1