aboutsummaryrefslogtreecommitdiff
path: root/delegate
diff options
context:
space:
mode:
authorMatthew Sloyan <matthew.sloyan@arm.com>2020-12-07 13:33:24 +0000
committerJim Flynn <jim.flynn@arm.com>2020-12-08 11:56:08 +0000
commitf00f6c2c6e01cac5c452b2e585b4ceea1e85a40f (patch)
treeccd58bec8be271e6a8ab531b6eff72760ef1e135 /delegate
parentf24375df4087b7d39062d8c46b190e7abea4bc9c (diff)
downloadarmnn-f00f6c2c6e01cac5c452b2e585b4ceea1e85a40f.tar.gz
IVGCVSW-5560 Fix TfLiteDelegate Reshape operator failure
* Fixed issue when running certain models with 2D shape tensor. * Falls back to inbuilt options if encountered. * Fixed ExecuteNetwork so that error messages are logged if NULL. * Updated TfLiteDelegate docs to include Logical Operators. Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com> Signed-off-by: David Monahan <david.monahan@arm.com> Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com> Change-Id: I5dbaf30351f7fc86e6178a0caf46c152812088d3
Diffstat (limited to 'delegate')
-rw-r--r--delegate/TensorFlowLiteDelegateSupport.md6
-rw-r--r--delegate/src/Redefine.hpp43
2 files changed, 23 insertions, 26 deletions
diff --git a/delegate/TensorFlowLiteDelegateSupport.md b/delegate/TensorFlowLiteDelegateSupport.md
index d94f14e485..c334018e49 100644
--- a/delegate/TensorFlowLiteDelegateSupport.md
+++ b/delegate/TensorFlowLiteDelegateSupport.md
@@ -38,6 +38,12 @@ The Arm NN SDK TensorFlow Lite delegate currently supports the following operato
* LESS_OR_EQUAL
+* LOGICAL_AND
+
+* LOGICAL_NOT
+
+* LOGICAL_OR
+
* LOGISTIC
* LOG_SOFTMAX
diff --git a/delegate/src/Redefine.hpp b/delegate/src/Redefine.hpp
index e88038362f..5e130b27f2 100644
--- a/delegate/src/Redefine.hpp
+++ b/delegate/src/Redefine.hpp
@@ -19,8 +19,8 @@ namespace armnnDelegate
{
TfLiteStatus CreateOutputTensorShape(const armnn::TensorInfo& inputTensorInfo,
- const std::vector<int32_t>& targetShape,
- armnn::ReshapeDescriptor& reshapeDesc)
+ const std::vector<int32_t>& targetShape,
+ armnn::ReshapeDescriptor& reshapeDesc)
{
std::vector<unsigned int> outputDims(targetShape.begin(), targetShape.end());
const auto stretchDim = std::find(targetShape.begin(), targetShape.end(), -1);
@@ -67,22 +67,14 @@ TfLiteStatus VisitReshapeOperator(DelegateData& delegateData,
const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
const TfLiteTensor& tfLiteInputTensor0 = tfLiteTensors[tfLiteNode->inputs->data[0]];
- if (IsDynamicTensor(tfLiteInputTensor0))
+ if (!IsValid(tfLiteContext, tfLiteInputTensor0, operatorCode, nodeIndex))
{
- TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic input tensors are not supported in "
- "operator #%d node #%d: ",
- operatorCode, nodeIndex);
return kTfLiteError;
}
const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
- if (IsDynamicTensor(tfLiteOutputTensor))
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
{
- TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic output tensors are not supported in "
- "operator #%d node #%d: ",
- operatorCode, nodeIndex);
return kTfLiteError;
}
@@ -91,18 +83,15 @@ TfLiteStatus VisitReshapeOperator(DelegateData& delegateData,
armnn::ReshapeDescriptor reshapeDesc;
std::vector<int32_t> targetShape;
+ bool shapeSet = false;
// The new shape can be defined by either a second input tensor or by a builtin option, we need to check for both.
if (numInputs == 2)
{
// Get shape from the second input tensor
const TfLiteTensor& tfLiteShapeInputTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
- if (IsDynamicTensor(tfLiteShapeInputTensor))
+ if (!IsValid(tfLiteContext, tfLiteShapeInputTensor, operatorCode, nodeIndex))
{
- TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
- "TfLiteArmnnDelegate: Dynamic input tensors are not supported in "
- "operator #%d node #%d: ",
- operatorCode, nodeIndex);
return kTfLiteError;
}
@@ -110,20 +99,22 @@ TfLiteStatus VisitReshapeOperator(DelegateData& delegateData,
{
TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
"TfLiteArmnnDelegate: Target 'shape' input is not a 1D tensor in "
- "operator #%d node #%d: ",
+ "operator #%d node #%d: Falling back to TfLiteOptions.",
operatorCode, nodeIndex);
- return kTfLiteError;
}
-
- // Get the shape data out of the input tensor
- auto* shapeTensorDataPtr = tflite::GetTensorData<int32_t>(&tfLiteShapeInputTensor);
- auto shapeTensorNumValues = tfLiteShapeInputTensor.dims->data[0];
- for (auto i=0; i < shapeTensorNumValues; ++i)
+ else
{
- targetShape.push_back(*(shapeTensorDataPtr+i));
+ // Get the shape data out of the input tensor
+ auto* shapeTensorDataPtr = tflite::GetTensorData<int32_t>(&tfLiteShapeInputTensor);
+ auto shapeTensorNumValues = tfLiteShapeInputTensor.dims->data[0];
+ for (auto i=0; i < shapeTensorNumValues; ++i)
+ {
+ targetShape.push_back(*(shapeTensorDataPtr+i));
+ }
+ shapeSet = true;
}
}
- else
+ if (!shapeSet)
{
// Get shape from the builtin data
TfLiteReshapeParams* reshapeOptions = reinterpret_cast<TfLiteReshapeParams*>(tfLiteNode->builtin_data);