aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2022-02-09 14:58:03 +0000
committerSadik Armagan <sadik.armagan@arm.com>2022-02-11 10:33:08 +0000
commitbfa767ca56f9776e7dd3eecb4025cfeed87f9936 (patch)
tree893be201ca8f5cb3f7b5cb9cec377588567fa6a2
parenteef6b76fedad6ba812c4eae74266c2828f9e8de4 (diff)
downloadarmnn-bfa767ca56f9776e7dd3eecb4025cfeed87f9936.tar.gz
MLCE-754 'Improve operator support error/warning from Arm NN Delegate'
* Improved error reporting on armnn_delegate Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Change-Id: I1bd131fb56d64b32b1fafad0465256178720226c
-rw-r--r--delegate/src/Activation.hpp2
-rw-r--r--delegate/src/ArgMinMax.hpp2
-rw-r--r--delegate/src/BatchSpace.hpp4
-rw-r--r--delegate/src/Comparison.hpp2
-rw-r--r--delegate/src/Control.hpp4
-rw-r--r--delegate/src/Convolution.hpp8
-rw-r--r--delegate/src/DelegateUtils.hpp18
-rw-r--r--delegate/src/ElementwiseBinary.hpp12
-rw-r--r--delegate/src/ElementwiseUnary.hpp2
-rw-r--r--delegate/src/Fill.hpp2
-rw-r--r--delegate/src/FullyConnected.hpp2
-rw-r--r--delegate/src/Gather.hpp2
-rw-r--r--delegate/src/LogicalBinary.hpp2
-rw-r--r--delegate/src/Lstm.hpp2
-rw-r--r--delegate/src/Normalization.hpp4
-rw-r--r--delegate/src/Pack.hpp2
-rw-r--r--delegate/src/Pad.hpp2
-rw-r--r--delegate/src/Pooling.hpp2
-rw-r--r--delegate/src/Prelu.hpp2
-rw-r--r--delegate/src/Quantization.hpp4
-rw-r--r--delegate/src/Redefine.hpp4
-rw-r--r--delegate/src/Reduce.hpp2
-rw-r--r--delegate/src/Resize.hpp2
-rw-r--r--delegate/src/Shape.hpp2
-rw-r--r--delegate/src/SharedFunctions.cpp2
-rw-r--r--delegate/src/Slice.hpp2
-rw-r--r--delegate/src/Softmax.hpp4
-rw-r--r--delegate/src/SpaceDepth.hpp4
-rw-r--r--delegate/src/Split.hpp4
-rw-r--r--delegate/src/Transpose.hpp2
-rw-r--r--delegate/src/UnidirectionalSequenceLstm.hpp2
-rw-r--r--delegate/src/Unpack.hpp4
-rw-r--r--delegate/src/armnn_delegate.cpp12
33 files changed, 69 insertions, 57 deletions
diff --git a/delegate/src/Activation.hpp b/delegate/src/Activation.hpp
index 5f14e2c45c..5ac92f8802 100644
--- a/delegate/src/Activation.hpp
+++ b/delegate/src/Activation.hpp
@@ -24,7 +24,7 @@ TfLiteStatus ValidateActivationOperator(DelegateData& delegateData,
bool isSupported = false;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("ACTIVATION",
tfLiteContext,
IsActivationSupported,
delegateData.m_Backends,
diff --git a/delegate/src/ArgMinMax.hpp b/delegate/src/ArgMinMax.hpp
index 54994dfef7..0b0b13d6ec 100644
--- a/delegate/src/ArgMinMax.hpp
+++ b/delegate/src/ArgMinMax.hpp
@@ -93,7 +93,7 @@ TfLiteStatus VisitArgMinMaxOperator(DelegateData& delegateData,
bool isSupported = false;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("ARGMINMAX",
tfLiteContext,
IsArgMinMaxSupported,
delegateData.m_Backends,
diff --git a/delegate/src/BatchSpace.hpp b/delegate/src/BatchSpace.hpp
index 318806feef..5f8d5cc317 100644
--- a/delegate/src/BatchSpace.hpp
+++ b/delegate/src/BatchSpace.hpp
@@ -74,7 +74,7 @@ TfLiteStatus VisitBatchToSpaceNdOperator(DelegateData& delegateData,
bool isSupported = false;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("BATCH_TO_SPACE_ND",
tfLiteContext,
IsBatchToSpaceNdSupported,
delegateData.m_Backends,
@@ -165,7 +165,7 @@ TfLiteStatus VisitSpaceToBatchNdOperator(DelegateData& delegateData,
bool isSupported = false;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("SPACE_TO_BATCH_ND",
tfLiteContext,
IsSpaceToBatchNdSupported,
delegateData.m_Backends,
diff --git a/delegate/src/Comparison.hpp b/delegate/src/Comparison.hpp
index 2e6a7db4b6..ce12e9f7c3 100644
--- a/delegate/src/Comparison.hpp
+++ b/delegate/src/Comparison.hpp
@@ -91,7 +91,7 @@ TfLiteStatus VisitComparisonOperator(DelegateData& delegateData,
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("COMPARISON",
tfLiteContext,
IsComparisonSupported,
delegateData.m_Backends,
diff --git a/delegate/src/Control.hpp b/delegate/src/Control.hpp
index f20b9f0cc3..7e02de1bdf 100644
--- a/delegate/src/Control.hpp
+++ b/delegate/src/Control.hpp
@@ -121,7 +121,7 @@ TfLiteStatus VisitConcatenationOperator(DelegateData& delegateData,
bool isSupported = false;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("CONCATENATION",
tfLiteContext,
IsConcatSupported,
delegateData.m_Backends,
@@ -260,7 +260,7 @@ TfLiteStatus VisitMeanOperator(DelegateData& delegateData,
bool isSupported = false;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("MEAN",
tfLiteContext,
IsMeanSupported,
delegateData.m_Backends,
diff --git a/delegate/src/Convolution.hpp b/delegate/src/Convolution.hpp
index a7d6c1de26..1b5ed40f0c 100644
--- a/delegate/src/Convolution.hpp
+++ b/delegate/src/Convolution.hpp
@@ -147,7 +147,7 @@ TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
if (!delegateData.m_Network)
{
bool isSupported = false;
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("CONV2D",
tfLiteContext,
IsConvolution2dSupported,
delegateData.m_Backends,
@@ -300,7 +300,7 @@ TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
if (!delegateData.m_Network)
{
bool isSupported = false;
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("CONV3D",
tfLiteContext,
IsConvolution3dSupported,
delegateData.m_Backends,
@@ -500,7 +500,7 @@ TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
if (!delegateData.m_Network)
{
bool isSupported = false;
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("DEPTHWISE_CONV2D",
tfLiteContext,
IsDepthwiseConvolutionSupported,
delegateData.m_Backends,
@@ -695,7 +695,7 @@ TfLiteStatus VisitTransposeConv2dOperator(DelegateData& delegateData,
if (!delegateData.m_Network)
{
bool isSupported = false;
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("TRANSPOSE_CONV2D",
tfLiteContext,
IsTransposeConvolution2dSupported,
delegateData.m_Backends,
diff --git a/delegate/src/DelegateUtils.hpp b/delegate/src/DelegateUtils.hpp
index 678a3db002..e0ba1cf4e7 100644
--- a/delegate/src/DelegateUtils.hpp
+++ b/delegate/src/DelegateUtils.hpp
@@ -25,7 +25,7 @@ namespace
{
// Macro to call an Is<layer_name>Supported function and log caller name together with reason for lack of support
-#define FORWARD_LAYER_SUPPORT_FUNC(funcName, tfLiteContext, func, backends, supported, ...) \
+#define FORWARD_LAYER_SUPPORT_FUNC(opName, tfLiteContext, func, backends, supported, ...) \
try \
{ \
for (auto&& backendId : backends) \
@@ -45,23 +45,23 @@ try \
if (reasonIfUnsupported.size() > 0) \
{ \
TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
- "%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
+ "%s: not supported by armnn: %s", opName, reasonIfUnsupported.c_str()); \
} \
else \
{ \
TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
- "%s: not supported by armnn", funcName); \
+ "%s: not supported by armnn", opName); \
} \
} \
} \
else \
{ \
- TF_LITE_KERNEL_LOG(tfLiteContext, "%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
+ TF_LITE_KERNEL_LOG(tfLiteContext, "%s: backend not registered: %s", opName, backendId.Get().c_str()); \
} \
} \
if (!supported) \
{ \
- TF_LITE_KERNEL_LOG(tfLiteContext, "%s: not supported by any specified backend", funcName); \
+ TF_LITE_KERNEL_LOG(tfLiteContext, "%s: not supported by any specified backend", opName); \
} \
} \
catch (const armnn::InvalidArgumentException &e) \
@@ -224,7 +224,7 @@ armnn::IConnectableLayer* BroadcastTensor(const armnn::TensorInfo& inputInfo0,
armnn::ReshapeDescriptor reshapeDescriptor;
reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
bool isSupported = false;
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("RESHAPE",
tfLiteContext,
IsReshapeSupported,
delegateData.m_Backends,
@@ -331,7 +331,7 @@ TfLiteStatus FusedActivation(TfLiteContext* tfLiteContext,
}
bool isSupported = false;
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("ACTIVATION",
tfLiteContext,
IsActivationSupported,
data.m_Backends,
@@ -561,7 +561,7 @@ TfLiteStatus ConnectConstant(armnn::IConnectableLayer* layer,
{
IgnoreUnused(layer);
bool isSupported = false;
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("CONSTANT",
tfLiteContext,
IsConstantSupported,
data.m_Backends,
@@ -608,7 +608,7 @@ TfLiteStatus ProcessInputs(armnn::IConnectableLayer* layer,
{
armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
bool isSupported = false;
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("CONSTANT",
tfLiteContext,
IsConstantSupported,
delegateData.m_Backends,
diff --git a/delegate/src/ElementwiseBinary.hpp b/delegate/src/ElementwiseBinary.hpp
index 0534c070be..434694c40a 100644
--- a/delegate/src/ElementwiseBinary.hpp
+++ b/delegate/src/ElementwiseBinary.hpp
@@ -27,7 +27,7 @@ TfLiteStatus ValidateAddOperator(DelegateData& delegateData,
bool isSupported = false;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("ADD",
tfLiteContext,
IsAdditionSupported,
delegateData.m_Backends,
@@ -51,7 +51,7 @@ TfLiteStatus ValidateDivOperator(DelegateData& delegateData,
bool isSupported = false;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("DIV",
tfLiteContext,
IsDivisionSupported,
delegateData.m_Backends,
@@ -103,7 +103,7 @@ TfLiteStatus ValidateMaximumOperator(DelegateData& delegateData,
bool isSupported = false;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("MAXIMUM",
tfLiteContext,
IsMaximumSupported,
delegateData.m_Backends,
@@ -126,7 +126,7 @@ TfLiteStatus ValidateMinimumOperator(DelegateData& delegateData,
bool isSupported = false;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("MINIMUM",
tfLiteContext,
IsMinimumSupported,
delegateData.m_Backends,
@@ -149,7 +149,7 @@ TfLiteStatus ValidateMulOperator(DelegateData& delegateData,
bool isSupported = false;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("MUL",
tfLiteContext,
IsMultiplicationSupported,
delegateData.m_Backends,
@@ -172,7 +172,7 @@ TfLiteStatus ValidateSubOperator(DelegateData& delegateData,
bool isSupported = false;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("SUB",
tfLiteContext,
IsSubtractionSupported,
delegateData.m_Backends,
diff --git a/delegate/src/ElementwiseUnary.hpp b/delegate/src/ElementwiseUnary.hpp
index f2f5301635..c65ddefa22 100644
--- a/delegate/src/ElementwiseUnary.hpp
+++ b/delegate/src/ElementwiseUnary.hpp
@@ -54,7 +54,7 @@ TfLiteStatus VisitElementwiseUnaryOperator(DelegateData& delegateData,
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("ELEMENTWISE_UNARY",
tfLiteContext,
IsElementwiseUnarySupported,
delegateData.m_Backends,
diff --git a/delegate/src/Fill.hpp b/delegate/src/Fill.hpp
index c6f94dd83c..859f5fffc2 100644
--- a/delegate/src/Fill.hpp
+++ b/delegate/src/Fill.hpp
@@ -74,7 +74,7 @@ TfLiteStatus VisitFillOperator(DelegateData& delegateData,
bool isSupported = false;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("FILL",
tfLiteContext,
IsFillSupported,
delegateData.m_Backends,
diff --git a/delegate/src/FullyConnected.hpp b/delegate/src/FullyConnected.hpp
index 49686d6eaf..18db550e5c 100644
--- a/delegate/src/FullyConnected.hpp
+++ b/delegate/src/FullyConnected.hpp
@@ -112,7 +112,7 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData,
bool isSupported = false;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("FULLY_CONNECTED",
tfLiteContext,
IsFullyConnectedSupported,
delegateData.m_Backends,
diff --git a/delegate/src/Gather.hpp b/delegate/src/Gather.hpp
index 634373a341..11f3b615ce 100644
--- a/delegate/src/Gather.hpp
+++ b/delegate/src/Gather.hpp
@@ -73,7 +73,7 @@ TfLiteStatus VisitGatherOperator(DelegateData& delegateData,
{
// Check if supported
bool isSupported = false;
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("GATHER",
tfLiteContext,
IsGatherSupported,
delegateData.m_Backends,
diff --git a/delegate/src/LogicalBinary.hpp b/delegate/src/LogicalBinary.hpp
index d877585849..2e9091b9d7 100644
--- a/delegate/src/LogicalBinary.hpp
+++ b/delegate/src/LogicalBinary.hpp
@@ -54,7 +54,7 @@ TfLiteStatus VisitLogicalBinaryOperator(DelegateData& delegateData,
bool isSupported = false;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("LOGICAL_BINARY",
tfLiteContext,
IsLogicalBinarySupported,
delegateData.m_Backends,
diff --git a/delegate/src/Lstm.hpp b/delegate/src/Lstm.hpp
index 8d719ee351..565c4817c0 100644
--- a/delegate/src/Lstm.hpp
+++ b/delegate/src/Lstm.hpp
@@ -218,7 +218,7 @@ TfLiteStatus VisitLstmOperator(DelegateData& delegateData,
bool isSupported = false;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("LSTM",
tfLiteContext,
IsLstmSupported,
delegateData.m_Backends,
diff --git a/delegate/src/Normalization.hpp b/delegate/src/Normalization.hpp
index 68ff3af32d..fa1c547bd4 100644
--- a/delegate/src/Normalization.hpp
+++ b/delegate/src/Normalization.hpp
@@ -44,7 +44,7 @@ TfLiteStatus VisitL2NormalizationOperator(DelegateData& delegateData,
bool isSupported = false;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("L2_NORMALIZATION",
tfLiteContext,
IsL2NormalizationSupported,
delegateData.m_Backends,
@@ -114,7 +114,7 @@ TfLiteStatus VisitLocalResponseNormalizationOperator(DelegateData& delegateData,
bool isSupported = false;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("NORMALIZATION",
tfLiteContext,
IsNormalizationSupported,
delegateData.m_Backends,
diff --git a/delegate/src/Pack.hpp b/delegate/src/Pack.hpp
index 5e93ba3f2a..2dc266bc47 100644
--- a/delegate/src/Pack.hpp
+++ b/delegate/src/Pack.hpp
@@ -76,7 +76,7 @@ TfLiteStatus VisitPackOperator(DelegateData& delegateData,
bool isSupported = false;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("STACK",
tfLiteContext,
IsStackSupported,
delegateData.m_Backends,
diff --git a/delegate/src/Pad.hpp b/delegate/src/Pad.hpp
index 78e07760fb..84280797b1 100644
--- a/delegate/src/Pad.hpp
+++ b/delegate/src/Pad.hpp
@@ -152,7 +152,7 @@ TfLiteStatus VisitPadOperator(DelegateData& delegateData,
if (!delegateData.m_Network)
{
bool isSupported = false;
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("PAD",
tfLiteContext,
IsPadSupported,
delegateData.m_Backends,
diff --git a/delegate/src/Pooling.hpp b/delegate/src/Pooling.hpp
index 07c1946d73..4095ac4ac2 100644
--- a/delegate/src/Pooling.hpp
+++ b/delegate/src/Pooling.hpp
@@ -85,7 +85,7 @@ TfLiteStatus VisitPoolingOperator(DelegateData& delegateData,
bool isSupported = false;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("POOLING_2D",
tfLiteContext,
IsPooling2dSupported,
delegateData.m_Backends,
diff --git a/delegate/src/Prelu.hpp b/delegate/src/Prelu.hpp
index 9baeaf475d..aef70e8e5b 100644
--- a/delegate/src/Prelu.hpp
+++ b/delegate/src/Prelu.hpp
@@ -24,7 +24,7 @@ TfLiteStatus ValidatePreluOperator(DelegateData& delegateData,
bool isSupported = false;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("PRELU",
tfLiteContext,
IsPreluSupported,
delegateData.m_Backends,
diff --git a/delegate/src/Quantization.hpp b/delegate/src/Quantization.hpp
index 3c274c6ff5..26a933953a 100644
--- a/delegate/src/Quantization.hpp
+++ b/delegate/src/Quantization.hpp
@@ -52,7 +52,7 @@ TfLiteStatus VisitDequantizeOperator(DelegateData& delegateData,
bool isSupported = false;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("DEQUANTIZE",
tfLiteContext,
IsDequantizeSupported,
delegateData.m_Backends,
@@ -131,7 +131,7 @@ TfLiteStatus VisitQuantizeOperator(DelegateData& delegateData,
bool isSupported = false;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("QUANTIZE",
tfLiteContext,
IsQuantizeSupported,
delegateData.m_Backends,
diff --git a/delegate/src/Redefine.hpp b/delegate/src/Redefine.hpp
index 766e600c7c..ce25f7f18b 100644
--- a/delegate/src/Redefine.hpp
+++ b/delegate/src/Redefine.hpp
@@ -46,7 +46,7 @@ TfLiteStatus VisitCastOperator(DelegateData& delegateData,
bool isSupported = false;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("CAST",
tfLiteContext,
IsCastSupported,
delegateData.m_Backends,
@@ -212,7 +212,7 @@ TfLiteStatus VisitReshapeOperator(DelegateData& delegateData,
bool isSupported = false;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("RESHAPE",
tfLiteContext,
IsReshapeSupported,
delegateData.m_Backends,
diff --git a/delegate/src/Reduce.hpp b/delegate/src/Reduce.hpp
index bcea16fdb0..b8db04ccf2 100644
--- a/delegate/src/Reduce.hpp
+++ b/delegate/src/Reduce.hpp
@@ -107,7 +107,7 @@ TfLiteStatus VisitReduceOperator(DelegateData& delegateData,
bool isSupported = false;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("REDUCE",
tfLiteContext,
IsReduceSupported,
delegateData.m_Backends,
diff --git a/delegate/src/Resize.hpp b/delegate/src/Resize.hpp
index 937cc00c8f..63f8e6871e 100644
--- a/delegate/src/Resize.hpp
+++ b/delegate/src/Resize.hpp
@@ -28,7 +28,7 @@ TfLiteStatus ValidateResizeOperator(DelegateData& delegateData,
const armnn::ResizeDescriptor& descriptor)
{
bool isSupported = false;
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("RESIZE",
tfLiteContext,
IsResizeSupported,
delegateData.m_Backends,
diff --git a/delegate/src/Shape.hpp b/delegate/src/Shape.hpp
index b173299a62..51aa8f143b 100644
--- a/delegate/src/Shape.hpp
+++ b/delegate/src/Shape.hpp
@@ -54,7 +54,7 @@ TfLiteStatus VisitShapeOperator(DelegateData& delegateData,
bool isSupported = false;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("SHAPE",
tfLiteContext,
IsShapeSupported,
delegateData.m_Backends,
diff --git a/delegate/src/SharedFunctions.cpp b/delegate/src/SharedFunctions.cpp
index 79b9f9061f..ad5d3101a2 100644
--- a/delegate/src/SharedFunctions.cpp
+++ b/delegate/src/SharedFunctions.cpp
@@ -24,7 +24,7 @@ TfLiteStatus ValidateFloorOperator(DelegateData& delegateData,
bool isSupported = false;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("FLOOR",
tfLiteContext,
IsFloorSupported,
delegateData.m_Backends,
diff --git a/delegate/src/Slice.hpp b/delegate/src/Slice.hpp
index a237034bb6..04b19cb660 100644
--- a/delegate/src/Slice.hpp
+++ b/delegate/src/Slice.hpp
@@ -116,7 +116,7 @@ TfLiteStatus VisitSliceOperator(DelegateData& delegateData,
bool isSupported = false;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("SLICE",
tfLiteContext,
IsStridedSliceSupported,
delegateData.m_Backends,
diff --git a/delegate/src/Softmax.hpp b/delegate/src/Softmax.hpp
index 0de8e1438c..f2570910fb 100644
--- a/delegate/src/Softmax.hpp
+++ b/delegate/src/Softmax.hpp
@@ -22,7 +22,7 @@ TfLiteStatus ValidateSoftmaxOperator(DelegateData& delegateData,
const armnn::SoftmaxDescriptor& descriptor)
{
bool isSupported = false;
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("SOFTMAX",
tfLiteContext,
IsSoftmaxSupported,
delegateData.m_Backends,
@@ -41,7 +41,7 @@ TfLiteStatus ValidateLogSoftmaxOperator(DelegateData& delegateData,
const armnn::LogSoftmaxDescriptor& descriptor)
{
bool isSupported = false;
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("LOG_SOFTMAX",
tfLiteContext,
IsLogSoftmaxSupported,
delegateData.m_Backends,
diff --git a/delegate/src/SpaceDepth.hpp b/delegate/src/SpaceDepth.hpp
index 03859b6fcb..ac162fbf54 100644
--- a/delegate/src/SpaceDepth.hpp
+++ b/delegate/src/SpaceDepth.hpp
@@ -45,7 +45,7 @@ TfLiteStatus VisitSpaceToDepthOperator(DelegateData& delegateData,
bool isSupported = false;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("SPACE_TO_DEPTH",
tfLiteContext,
IsSpaceToDepthSupported,
delegateData.m_Backends,
@@ -104,7 +104,7 @@ TfLiteStatus VisitDepthToSpaceOperator(DelegateData& delegateData,
bool isSupported = false;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("DEPTH_TO_SPACE",
tfLiteContext,
IsDepthToSpaceSupported,
delegateData.m_Backends,
diff --git a/delegate/src/Split.hpp b/delegate/src/Split.hpp
index ad55e53ef2..66e229562a 100644
--- a/delegate/src/Split.hpp
+++ b/delegate/src/Split.hpp
@@ -117,7 +117,7 @@ TfLiteStatus VisitSplitOperator(DelegateData& delegateData,
{
// Check if supported
bool isSupported = false;
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("SPLIT",
tfLiteContext,
IsSplitterSupported,
delegateData.m_Backends,
@@ -309,7 +309,7 @@ TfLiteStatus VisitSplitVOperator(DelegateData& delegateData,
{
// Check if supported
bool isSupported = false;
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("SPLIT",
tfLiteContext,
IsSplitterSupported,
delegateData.m_Backends,
diff --git a/delegate/src/Transpose.hpp b/delegate/src/Transpose.hpp
index 580e0624e3..0741ad285f 100644
--- a/delegate/src/Transpose.hpp
+++ b/delegate/src/Transpose.hpp
@@ -74,7 +74,7 @@ TfLiteStatus VisitTransposeOperator(DelegateData& delegateData,
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("TRANSPOSE",
tfLiteContext,
IsTransposeSupported,
delegateData.m_Backends,
diff --git a/delegate/src/UnidirectionalSequenceLstm.hpp b/delegate/src/UnidirectionalSequenceLstm.hpp
index 8aff39381e..a923874a74 100644
--- a/delegate/src/UnidirectionalSequenceLstm.hpp
+++ b/delegate/src/UnidirectionalSequenceLstm.hpp
@@ -224,7 +224,7 @@ TfLiteStatus VisitUnidirectionalSequenceLstmOperator(DelegateData& delegateData,
bool isSupported = false;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("UNIDIRECTIONAL_SEQUENCE_LSTM",
tfLiteContext,
IsUnidirectionalSequenceLstmSupported,
delegateData.m_Backends,
diff --git a/delegate/src/Unpack.hpp b/delegate/src/Unpack.hpp
index 4163163243..447a4da9ab 100644
--- a/delegate/src/Unpack.hpp
+++ b/delegate/src/Unpack.hpp
@@ -137,7 +137,7 @@ TfLiteStatus VisitUnpackOperator(DelegateData& delegateData,
{
// Check if splitter is supported
bool isSupported = false;
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("UNPACK",
tfLiteContext,
IsSplitterSupported,
delegateData.m_Backends,
@@ -156,7 +156,7 @@ TfLiteStatus VisitUnpackOperator(DelegateData& delegateData,
if (!delegateData.m_Network)
{
bool isSupported = false;
- FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ FORWARD_LAYER_SUPPORT_FUNC("RESHAPE",
tfLiteContext,
IsReshapeSupported,
delegateData.m_Backends,
diff --git a/delegate/src/armnn_delegate.cpp b/delegate/src/armnn_delegate.cpp
index ed19b72787..03db4a17f8 100644
--- a/delegate/src/armnn_delegate.cpp
+++ b/delegate/src/armnn_delegate.cpp
@@ -183,6 +183,9 @@ TfLiteIntArray* Delegate::IdentifyOperatorsToDelegate(TfLiteContext* tfLiteConte
TfLiteIntArray* nodesToDelegate = TfLiteIntArrayCreate(executionPlan->size);
nodesToDelegate->size = 0;
+
+ std::set<int32_t> unsupportedOperators;
+
for (int i = 0; i < executionPlan->size; ++i)
{
const int nodeIndex = executionPlan->data[i];
@@ -203,12 +206,21 @@ TfLiteIntArray* Delegate::IdentifyOperatorsToDelegate(TfLiteContext* tfLiteConte
delegateData, tfLiteContext, tfLiteRegistration, tfLiteNode, nodeIndex) != kTfLiteOk)
{
// node is not supported by ArmNN
+ unsupportedOperators.insert(tfLiteRegistration->builtin_code);
continue;
}
nodesToDelegate->data[nodesToDelegate->size++] = nodeIndex;
}
+ for (std::set<int32_t>::iterator it=unsupportedOperators.begin(); it!=unsupportedOperators.end(); ++it)
+ {
+ TF_LITE_KERNEL_LOG(tfLiteContext,
+ "Operator %s [%d] is not supported by armnn_delegate.",
+ tflite::EnumNameBuiltinOperator(tflite::BuiltinOperator(*it)),
+ *it);
+ }
+
std::sort(&nodesToDelegate->data[0], &nodesToDelegate->data[nodesToDelegate->size]);
return nodesToDelegate;
}