aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2020-11-18 09:37:03 +0000
committerSadik Armagan <sadik.armagan@arm.com>2020-11-18 09:37:03 +0000
commit15f7faef88357679064e0e9d3bd91dd18c7625d6 (patch)
treed8dcfc49dc67614feb5edf0dc1ff49b7ca2581d4
parent5d03e31aaf4d82e9f9cdc03c41d2328bbb2a0dee (diff)
downloadarmnn-15f7faef88357679064e0e9d3bd91dd18c7625d6.tar.gz
IVGCVSW-5377 'Add ArmNN TfLite delegate to ExecuteNetwork'
* Assign correct input values for the model * Call the right Validate function for Mul and Sub operators * Return the correct data type for kTfLiteInt8 Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Change-Id: I6d23adf68d33d8be9a1fbf5d19dfe47939a6d3d6
-rw-r--r--delegate/src/DelegateUtils.hpp16
-rw-r--r--delegate/src/ElementwiseBinary.hpp4
-rw-r--r--tests/ExecuteNetwork/ExecuteNetwork.cpp49
3 files changed, 52 insertions, 17 deletions
diff --git a/delegate/src/DelegateUtils.hpp b/delegate/src/DelegateUtils.hpp
index 0537ba911b..fad07ff267 100644
--- a/delegate/src/DelegateUtils.hpp
+++ b/delegate/src/DelegateUtils.hpp
@@ -342,14 +342,26 @@ armnn::DataType GetDataType(const TfLiteTensor& tfLiteTensor)
case kTfLiteUInt8:
return armnn::DataType::QAsymmU8;
case kTfLiteInt8:
- if (tfLiteTensor.params.zero_point == 0)
+ {
+ auto quantizationInfo = tfLiteTensor.quantization;
+ if (quantizationInfo.type == kTfLiteAffineQuantization)
{
- return armnn::DataType::QSymmS8;
+ auto* quantization =
+ reinterpret_cast<TfLiteAffineQuantization*>(tfLiteTensor.quantization.params);
+ if (quantization->zero_point != nullptr && quantization->zero_point->size == 1)
+ {
+ return armnn::DataType::QAsymmS8;
+ }
+ else
+ {
+ return armnn::DataType::QSymmS8;
+ }
}
else
{
return armnn::DataType::QAsymmS8;
}
+ }
case kTfLiteInt16:
return armnn::DataType::QSymmS16;
case kTfLiteInt32:
diff --git a/delegate/src/ElementwiseBinary.hpp b/delegate/src/ElementwiseBinary.hpp
index e5270057f5..49a5dfb0d9 100644
--- a/delegate/src/ElementwiseBinary.hpp
+++ b/delegate/src/ElementwiseBinary.hpp
@@ -228,13 +228,13 @@ TfLiteStatus VisitElementwiseBinaryOperator(DelegateData& delegateData,
inputTensorInfo1,
outputTensorInfo);
case kTfLiteBuiltinMul:
- return ValidateDivOperator(delegateData,
+ return ValidateMulOperator(delegateData,
tfLiteContext,
inputTensorInfo0,
inputTensorInfo1,
outputTensorInfo);
case kTfLiteBuiltinSub:
- return ValidateDivOperator(delegateData,
+ return ValidateSubOperator(delegateData,
tfLiteContext,
inputTensorInfo0,
inputTensorInfo1,
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index fa84a6ee4f..ba7ce29cd7 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -77,6 +77,14 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params,
for(unsigned int inputIndex = 0; inputIndex < numInputs; ++inputIndex)
{
int input = tfLiteInterpreter->inputs()[inputIndex];
+ TfLiteIntArray* inputDims = tfLiteInterpreter->tensor(input)->dims;
+
+ long inputSize = 1;
+ for (unsigned int dim = 0; dim < static_cast<unsigned int>(inputDims->size); ++dim)
+ {
+ inputSize *= inputDims->data[dim];
+ }
+
if (params.m_InputTypes[inputIndex].compare("float") == 0)
{
auto inputData = tfLiteInterpreter->typed_tensor<float>(input);
@@ -86,8 +94,15 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params,
params.m_InputTypes[inputIndex],
armnn::EmptyOptional(),
dataFile);
- inputData = reinterpret_cast<float*>(&tensorData);
- armnn::IgnoreUnused(inputData);
+
+ mapbox::util::apply_visitor([&](auto&& value)
+ {
+ for (unsigned int i = 0; i < inputSize; ++i)
+ {
+ inputData[i] = value.data()[i];
+ }
+ },
+ tensorData);
}
else if (params.m_InputTypes[inputIndex].compare("int") == 0)
{
@@ -98,8 +113,14 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params,
params.m_InputTypes[inputIndex],
armnn::EmptyOptional(),
dataFile);
- inputData = reinterpret_cast<int32_t*>(&tensorData);
- armnn::IgnoreUnused(inputData);
+ mapbox::util::apply_visitor([&](auto&& value)
+ {
+ for (unsigned int i = 0; i < inputSize; ++i)
+ {
+ inputData[i] = value.data()[i];
+ }
+ },
+ tensorData);
}
else if (params.m_InputTypes[inputIndex].compare("qasymm8") == 0)
{
@@ -110,8 +131,14 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params,
params.m_InputTypes[inputIndex],
armnn::EmptyOptional(),
dataFile);
- inputData = reinterpret_cast<uint8_t*>(&tensorData);
- armnn::IgnoreUnused(inputData);
+ mapbox::util::apply_visitor([&](auto&& value)
+ {
+ for (unsigned int i = 0; i < inputSize; ++i)
+ {
+ inputData[i] = value.data()[i];
+ }
+ },
+ tensorData);
}
else
{
@@ -128,21 +155,19 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params,
// Print out the output
for (unsigned int outputIndex = 0; outputIndex < params.m_OutputNames.size(); ++outputIndex)
{
- std::cout << "Printing out the output" << std::endl;
auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[outputIndex];
- TfLiteIntArray *outputDims = tfLiteInterpreter->tensor(tfLiteDelegateOutputId)->dims;
+ TfLiteIntArray* outputDims = tfLiteInterpreter->tensor(tfLiteDelegateOutputId)->dims;
- int outputSize = 1;
+ long outputSize = 1;
for (unsigned int dim = 0; dim < static_cast<unsigned int>(outputDims->size); ++dim)
{
- outputSize *= outputDims->data[dim];
+ outputSize *= outputDims->data[dim];
}
std::cout << params.m_OutputNames[outputIndex] << ": ";
if (params.m_OutputTypes[outputIndex].compare("float") == 0)
{
auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
-
if(tfLiteDelageOutputData == NULL)
{
ARMNN_LOG(fatal) << "Output tensor is null, output type: "
@@ -162,7 +187,6 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params,
else if (params.m_OutputTypes[outputIndex].compare("int") == 0)
{
auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<int32_t>(tfLiteDelegateOutputId);
-
if(tfLiteDelageOutputData == NULL)
{
ARMNN_LOG(fatal) << "Output tensor is null, output type: "
@@ -182,7 +206,6 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params,
else if (params.m_OutputTypes[outputIndex].compare("qasymm8") == 0)
{
auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<uint8_t>(tfLiteDelegateOutputId);
-
if(tfLiteDelageOutputData == NULL)
{
ARMNN_LOG(fatal) << "Output tensor is null, output type: "