aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Kelly <mike.kelly@arm.com>2023-08-03 10:42:11 +0100
committerTeresaARM <teresa.charlinreyes@arm.com>2023-08-03 22:04:58 +0000
commita280650509c4fbaa2508cdbc42e31f5a1805bd2a (patch)
tree1b60f4a2e6c7719ae873e8bdef2a76bd82c35033
parent7d1b1ca5b25a3aaeecebc537e8cbed2ee1ed3337 (diff)
downloadarmnn-a280650509c4fbaa2508cdbc42e31f5a1805bd2a.tar.gz
MLCE-1092 Added layerNames to opaque delegate
* All layers added through the opaque delegate will have a name that includes the nodeIndex from the tflite model. * Added utilities to OpaqueDelegateUtils to get the names for the layers. Signed-off-by: Mike Kelly <mike.kelly@arm.com> Change-Id: Iadcc21646d0b6fcc2c524d6239211ad3af6b6577
-rw-r--r--delegate/opaque/src/Activation.hpp6
-rw-r--r--delegate/opaque/src/ArgMinMax.hpp5
-rw-r--r--delegate/opaque/src/BatchMatMul.hpp5
-rw-r--r--delegate/opaque/src/BatchSpace.hpp10
-rw-r--r--delegate/opaque/src/Comparison.hpp10
-rw-r--r--delegate/opaque/src/Control.hpp14
-rw-r--r--delegate/opaque/src/Convolution.hpp54
-rw-r--r--delegate/opaque/src/ElementwiseBinary.hpp60
-rw-r--r--delegate/opaque/src/ElementwiseUnary.hpp5
-rw-r--r--delegate/opaque/src/Fill.hpp6
-rw-r--r--delegate/opaque/src/FullyConnected.hpp8
-rw-r--r--delegate/opaque/src/Gather.hpp6
-rw-r--r--delegate/opaque/src/GatherNd.hpp6
-rw-r--r--delegate/opaque/src/LogicalBinary.hpp7
-rw-r--r--delegate/opaque/src/Lstm.hpp3
-rw-r--r--delegate/opaque/src/Normalization.hpp10
-rw-r--r--delegate/opaque/src/OpaqueDelegateUtils.hpp56
-rw-r--r--delegate/opaque/src/Pack.hpp6
-rw-r--r--delegate/opaque/src/Pad.hpp3
-rw-r--r--delegate/opaque/src/Pooling.hpp18
-rw-r--r--delegate/opaque/src/Prelu.hpp7
-rw-r--r--delegate/opaque/src/Quantization.hpp11
-rw-r--r--delegate/opaque/src/Redefine.hpp20
-rw-r--r--delegate/opaque/src/Reduce.hpp5
-rw-r--r--delegate/opaque/src/Resize.hpp5
-rw-r--r--delegate/opaque/src/ReverseV2.hpp5
-rw-r--r--delegate/opaque/src/Round.hpp5
-rw-r--r--delegate/opaque/src/Shape.hpp7
-rw-r--r--delegate/opaque/src/Slice.hpp5
-rw-r--r--delegate/opaque/src/Softmax.hpp8
-rw-r--r--delegate/opaque/src/SpaceDepth.hpp10
-rw-r--r--delegate/opaque/src/Split.hpp8
-rw-r--r--delegate/opaque/src/StridedSlice.hpp5
-rw-r--r--delegate/opaque/src/Tile.hpp4
-rw-r--r--delegate/opaque/src/Transpose.hpp5
-rw-r--r--delegate/opaque/src/UnidirectionalSequenceLstm.hpp5
-rw-r--r--delegate/opaque/src/Unpack.hpp7
37 files changed, 278 insertions, 142 deletions
diff --git a/delegate/opaque/src/Activation.hpp b/delegate/opaque/src/Activation.hpp
index f56609001a..dd9c2f68bc 100644
--- a/delegate/opaque/src/Activation.hpp
+++ b/delegate/opaque/src/Activation.hpp
@@ -188,14 +188,16 @@ TfLiteStatus VisitActivationOperator(DelegateData& delegateData,
outputTensorInfo,
activationDesc);
}
- armnn::IConnectableLayer* activationLayer = delegateData.m_Network->AddActivationLayer(activationDesc);
+ auto layerName = GetName(activationDesc.m_Function, nodeIndex);
+ armnn::IConnectableLayer* activationLayer = delegateData.m_Network->AddActivationLayer(activationDesc,
+ layerName.c_str());
ARMNN_ASSERT(activationLayer != nullptr);
armnn::IOutputSlot& outputSlot = activationLayer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(activationLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(activationLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/opaque/src/ArgMinMax.hpp b/delegate/opaque/src/ArgMinMax.hpp
index e5499022c6..5ea7aa8655 100644
--- a/delegate/opaque/src/ArgMinMax.hpp
+++ b/delegate/opaque/src/ArgMinMax.hpp
@@ -144,7 +144,8 @@ TfLiteStatus VisitArgMinMaxOperator(DelegateData& delegateData,
}
// Add an ArgMinMax layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddArgMinMaxLayer(desc);
+ auto layerName = GetName(desc.m_Function, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddArgMinMaxLayer(desc, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -152,7 +153,7 @@ TfLiteStatus VisitArgMinMaxOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/opaque/src/BatchMatMul.hpp b/delegate/opaque/src/BatchMatMul.hpp
index 5261fbd6c4..257c410d14 100644
--- a/delegate/opaque/src/BatchMatMul.hpp
+++ b/delegate/opaque/src/BatchMatMul.hpp
@@ -102,7 +102,8 @@ TfLiteStatus VisitBatchMatMulOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddBatchMatMulLayer(descriptor);
+ auto layerName = GetName(armnn::LayerType::BatchMatMul, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddBatchMatMulLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -110,7 +111,7 @@ TfLiteStatus VisitBatchMatMulOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/opaque/src/BatchSpace.hpp b/delegate/opaque/src/BatchSpace.hpp
index c760a14f5e..00e270558e 100644
--- a/delegate/opaque/src/BatchSpace.hpp
+++ b/delegate/opaque/src/BatchSpace.hpp
@@ -119,7 +119,8 @@ TfLiteStatus VisitBatchToSpaceNdOperator(DelegateData& delegateData,
}
// Add a BatchToSpace layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddBatchToSpaceNdLayer(descriptor);
+ auto layerName = GetName(armnn::LayerType::BatchToSpaceNd, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddBatchToSpaceNdLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -127,7 +128,7 @@ TfLiteStatus VisitBatchToSpaceNdOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
@@ -244,7 +245,8 @@ TfLiteStatus VisitSpaceToBatchNdOperator(DelegateData& delegateData,
}
// Add a SpaceToBatch layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddSpaceToBatchNdLayer(descriptor);
+ auto layerName = GetName(armnn::LayerType::SpaceToBatchNd, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddSpaceToBatchNdLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -252,7 +254,7 @@ TfLiteStatus VisitSpaceToBatchNdOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/opaque/src/Comparison.hpp b/delegate/opaque/src/Comparison.hpp
index 8740cfb0ea..026a43adaf 100644
--- a/delegate/opaque/src/Comparison.hpp
+++ b/delegate/opaque/src/Comparison.hpp
@@ -10,7 +10,7 @@
namespace armnnOpaqueDelegate
{
-std::string GetLayerName(armnn::ComparisonOperation comparisonOperation)
+std::string GetOperationName(armnn::ComparisonOperation comparisonOperation)
{
std::string layerName = "COMPARISON";
switch (comparisonOperation)
@@ -123,11 +123,13 @@ TfLiteStatus VisitComparisonOperator(DelegateData& delegateData,
if (!delegateData.m_Network)
{
- validateFunc(outputTensorInfo, isSupported, GetLayerName(comparisonOperation));
+ validateFunc(outputTensorInfo, isSupported, GetOperationName(comparisonOperation));
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* comparisonLayer = delegateData.m_Network->AddComparisonLayer(descriptor);
+ auto layerName = GetName(descriptor.m_Operation, nodeIndex);
+ armnn::IConnectableLayer* comparisonLayer = delegateData.m_Network->AddComparisonLayer(descriptor,
+ layerName.c_str());
comparisonLayer->SetBackendId(setBackend);
ARMNN_ASSERT(comparisonLayer != nullptr);
@@ -135,7 +137,7 @@ TfLiteStatus VisitComparisonOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(comparisonLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(comparisonLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/opaque/src/Control.hpp b/delegate/opaque/src/Control.hpp
index bcf3c3380b..9aef8380af 100644
--- a/delegate/opaque/src/Control.hpp
+++ b/delegate/opaque/src/Control.hpp
@@ -140,7 +140,9 @@ TfLiteStatus VisitConcatenationOperator(DelegateData& delegateData,
}
// Setup layer and connect.
- armnn::IConnectableLayer* concatenationLayer = delegateData.m_Network->AddConcatLayer(concatDescriptor);
+ auto layerName = GetName(armnn::LayerType::Concat, nodeIndex);
+ armnn::IConnectableLayer* concatenationLayer = delegateData.m_Network->AddConcatLayer(concatDescriptor,
+ layerName.c_str());
concatenationLayer->SetBackendId(setBackend);
ARMNN_ASSERT(concatenationLayer != nullptr);
@@ -148,7 +150,8 @@ TfLiteStatus VisitConcatenationOperator(DelegateData& delegateData,
auto inputsTensorsProcess = ProcessInputs(concatenationLayer,
delegateData,
tfLiteContext,
- tfLiteNode);
+ tfLiteNode,
+ nodeIndex);
if (inputsTensorsProcess == kTfLiteError)
{
return inputsTensorsProcess;
@@ -168,7 +171,7 @@ TfLiteStatus VisitConcatenationOperator(DelegateData& delegateData,
}
// Check and Create activation
- return FusedActivation(tfLiteContext, tfLiteNode, activationType, concatenationLayer, 0, delegateData);
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, concatenationLayer, 0, delegateData, nodeIndex);
}
TfLiteStatus VisitMeanOperator(DelegateData& delegateData,
@@ -272,7 +275,8 @@ TfLiteStatus VisitMeanOperator(DelegateData& delegateData,
}
// Setup layer and connect.
- armnn::IConnectableLayer* meanLayer = delegateData.m_Network->AddMeanLayer(desc);
+ auto layerName = GetName(armnn::LayerType::Mean, nodeIndex);
+ armnn::IConnectableLayer* meanLayer = delegateData.m_Network->AddMeanLayer(desc, layerName.c_str());
meanLayer->SetBackendId(setBackend);
ARMNN_ASSERT(meanLayer != nullptr);
@@ -280,7 +284,7 @@ TfLiteStatus VisitMeanOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(meanLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(meanLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/opaque/src/Convolution.hpp b/delegate/opaque/src/Convolution.hpp
index 2eb5edabe0..384c62b678 100644
--- a/delegate/opaque/src/Convolution.hpp
+++ b/delegate/opaque/src/Convolution.hpp
@@ -154,14 +154,16 @@ TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
}
// Set up filter and biases
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution2dLayer(descriptor);
+ auto layerName = GetName(armnn::LayerType::Convolution2d, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution2dLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
if(filterTensorInfo.IsConstant())
{
auto filter = CreateConstTensor(tfLiteFilterTensor, filterTensorInfo);
- armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
+ auto filterName = GetName(armnn::LayerType::Constant, nodeIndex, "Filter");
+ armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter, filterName.c_str());
weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
}
@@ -171,7 +173,10 @@ TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
if (biasTensorInfo.IsConstant())
{
auto biasTensor = CreateConstTensor(tfLiteBiasTensor, biasTensorInfo);
- armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
+
+ auto biasName = GetName(armnn::LayerType::Constant, nodeIndex, "Bias");
+ armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor,
+ biasName.c_str());
ARMNN_ASSERT(biasLayer != nullptr);
biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
@@ -183,7 +188,8 @@ TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
{
auto input = CreateConstTensor(tfLiteInputTensor, inputTensorInfo);
- armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input);
+ auto inputName = GetName(armnn::LayerType::Constant, nodeIndex, "Input");
+ armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input, inputName.c_str());
inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
}
@@ -205,7 +211,7 @@ TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
}
// Check and Create activation
- return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData, nodeIndex);
}
TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
@@ -348,7 +354,9 @@ TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthwiseConvolution2dLayer(descriptor);
+ auto layerName = GetName(armnn::LayerType::DepthwiseConvolution2d, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthwiseConvolution2dLayer(descriptor,
+ layerName.c_str());
layer->SetBackendId(setBackend);
if(filterTensorInfo.IsConstant())
@@ -356,7 +364,8 @@ TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
// For depthwise the weights layout is the same as for tflite [1, H, W, I*M]. No permutation required.
auto filter = CreateConstTensor(tfLiteFilterTensor, filterTensorInfo);
- armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
+ auto filterName = GetName(armnn::LayerType::Constant, nodeIndex, "Filter");
+ armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter, filterName.c_str());
weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
}
@@ -367,7 +376,9 @@ TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
{
auto biasTensor = CreateConstTensor(tfLiteBiasTensor, biasTensorInfo);
- armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
+ auto biasName = GetName(armnn::LayerType::Constant, nodeIndex, "Bias");
+ armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor,
+ biasName.c_str());
ARMNN_ASSERT(biasLayer != nullptr);
biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
@@ -379,7 +390,8 @@ TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
{
auto input = CreateConstTensor(tfLiteInputTensor, inputTensorInfo);
- armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input);
+ auto inputName = GetName(armnn::LayerType::Constant, nodeIndex, "Input");
+ armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input, inputName.c_str());
inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
}
@@ -400,7 +412,7 @@ TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
return kTfLiteOk;
}
// Check and create activation
- return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData, nodeIndex);
}
TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
@@ -552,7 +564,8 @@ TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution3dLayer(descriptor);
+ auto layerName = GetName(armnn::LayerType::Convolution3d, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution3dLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -563,7 +576,8 @@ TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
auto filter = CreateConstTensor(tfLiteFilterTensor,
filterTensorInfo);
- armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
+ auto filterName = GetName(armnn::LayerType::Constant, nodeIndex, "Filter");
+ armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter, filterName.c_str());
ARMNN_ASSERT(weightsLayer != nullptr);
weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
@@ -576,7 +590,9 @@ TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
{
auto biasTensor = CreateConstTensor(tfLiteBiasTensor, biasTensorInfo);
- armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
+ auto biasName = GetName(armnn::LayerType::Constant, nodeIndex, "Bias");
+ armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor,
+ biasName.c_str());
ARMNN_ASSERT(biasLayer != nullptr);
biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
@@ -589,7 +605,8 @@ TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
{
auto input = CreateConstTensor(tfLiteInputTensor, inputTensorInfo);
- armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input);
+ auto inputName = GetName(armnn::LayerType::Constant, nodeIndex, "Input");
+ armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input, inputName.c_str());
inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
}
@@ -609,7 +626,7 @@ TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
}
// Check and create activation
- return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData, nodeIndex);
}
@@ -781,9 +798,11 @@ TfLiteStatus VisitTransposeConv2dOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
+ auto layerName = GetName(armnn::LayerType::TransposeConvolution2d, nodeIndex);
armnn::IConnectableLayer* layer = delegateData.m_Network->AddTransposeConvolution2dLayer(descriptor,
filterTensor,
- armnn::EmptyOptional());
+ armnn::EmptyOptional(),
+ layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -792,7 +811,8 @@ TfLiteStatus VisitTransposeConv2dOperator(DelegateData& delegateData,
{
auto input = CreateConstTensor(tfLiteInputTensor, inputTensorInfo);
- armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input);
+ auto inputName = GetName(armnn::LayerType::Constant, nodeIndex, "Input");
+ armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input, inputName.c_str());
inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
}
diff --git a/delegate/opaque/src/ElementwiseBinary.hpp b/delegate/opaque/src/ElementwiseBinary.hpp
index 8448609695..2a67802028 100644
--- a/delegate/opaque/src/ElementwiseBinary.hpp
+++ b/delegate/opaque/src/ElementwiseBinary.hpp
@@ -244,10 +244,14 @@ TfLiteStatus ValidateSubOperator(DelegateData& delegateData,
std::pair<armnn::IConnectableLayer*, armnn::IConnectableLayer*> AddFloorDivLayer(
DelegateData& delegateData,
- const armnn::TensorInfo& outputTensorInfo)
+ const armnn::TensorInfo& outputTensorInfo,
+ int nodeIndex)
{
+ auto layerName = GetName(armnn::BinaryOperation::Div, nodeIndex);
armnn::IConnectableLayer* divisionLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
- armnn::BinaryOperation::Div);
+ armnn::BinaryOperation::Div,
+ layerName.c_str());
+
// if the output of the div is Signed32 the Floor layer is not required
if (armnn::DataType::Signed32 == outputTensorInfo.GetDataType())
{
@@ -255,7 +259,8 @@ std::pair<armnn::IConnectableLayer*, armnn::IConnectableLayer*> AddFloorDivLayer
}
armnn::IOutputSlot& outputSlot = divisionLayer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
- armnn::IConnectableLayer* floorLayer = delegateData.m_Network->AddFloorLayer();
+ auto floorName = GetName(armnn::LayerType::Floor, nodeIndex);
+ armnn::IConnectableLayer* floorLayer = delegateData.m_Network->AddFloorLayer(floorName.c_str());
outputSlot.Connect(floorLayer->GetInputSlot(0));
return std::make_pair(divisionLayer, floorLayer);
}
@@ -411,46 +416,55 @@ TfLiteStatus VisitElementwiseBinaryOperator(DelegateData& delegateData,
armnn::IConnectableLayer* elementwiseBinaryLayer = nullptr;
armnnDelegate::MultiLayerFacade multiLayer;
+ std::string layerName;
switch(elementwiseBinaryOperatorCode)
{
case kTfLiteBuiltinAdd:
- elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
- armnn::BinaryOperation::Add);
+ layerName = GetName(armnn::BinaryOperation::Add, nodeIndex);
+ elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(armnn::BinaryOperation::Add,
+ layerName.c_str());
break;
case kTfLiteBuiltinDiv:
- elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
- armnn::BinaryOperation::Div);
+ layerName = GetName(armnn::BinaryOperation::Div, nodeIndex);
+ elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(armnn::BinaryOperation::Div,
+ layerName.c_str());
break;
case kTfLiteBuiltinFloorDiv:
{
- auto layers = AddFloorDivLayer(delegateData, outputTensorInfo);
+ auto layers = AddFloorDivLayer(delegateData, outputTensorInfo, nodeIndex);
multiLayer.AssignValues(layers.first, layers.second);
elementwiseBinaryLayer = &multiLayer;
}
break;
case kTfLiteBuiltinMaximum:
- elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
- armnn::BinaryOperation::Maximum);
+ layerName = GetName(armnn::BinaryOperation::Maximum, nodeIndex);
+ elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(armnn::BinaryOperation::Maximum,
+ layerName.c_str());
break;
case kTfLiteBuiltinMinimum:
- elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
- armnn::BinaryOperation::Minimum);
+ layerName = GetName(armnn::BinaryOperation::Minimum, nodeIndex);
+ elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(armnn::BinaryOperation::Minimum,
+ layerName.c_str());
break;
case kTfLiteBuiltinMul:
- elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
- armnn::BinaryOperation::Mul);
+ layerName = GetName(armnn::BinaryOperation::Mul, nodeIndex);
+ elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(armnn::BinaryOperation::Mul,
+ layerName.c_str());
break;
case kTfLiteBuiltinPow:
- elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
- armnn::BinaryOperation::Power);
+ layerName = GetName(armnn::BinaryOperation::Power, nodeIndex);
+ elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(armnn::BinaryOperation::Power,
+ layerName.c_str());
break;
case kTfLiteBuiltinSquaredDifference:
- elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
- armnn::BinaryOperation::SqDiff);
+ layerName = GetName(armnn::BinaryOperation::SqDiff, nodeIndex);
+ elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(armnn::BinaryOperation::SqDiff,
+ layerName.c_str());
break;
case kTfLiteBuiltinSub:
- elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
- armnn::BinaryOperation::Sub);
+ layerName = GetName(armnn::BinaryOperation::Sub, nodeIndex);
+ elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(armnn::BinaryOperation::Sub,
+ layerName.c_str());
break;
default:
return kTfLiteError;
@@ -462,7 +476,8 @@ TfLiteStatus VisitElementwiseBinaryOperator(DelegateData& delegateData,
auto inputsTensorsProcess = ProcessInputs(elementwiseBinaryLayer,
delegateData,
tfLiteContext,
- tfLiteNode);
+ tfLiteNode,
+ nodeIndex);
if (inputsTensorsProcess == kTfLiteError)
{
return inputsTensorsProcess;
@@ -479,7 +494,8 @@ TfLiteStatus VisitElementwiseBinaryOperator(DelegateData& delegateData,
return kTfLiteOk;
}
// Check and Create Activation
- return FusedActivation(tfLiteContext, tfLiteNode, activationType, elementwiseBinaryLayer, 0, delegateData);
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, elementwiseBinaryLayer, 0, delegateData,
+ nodeIndex);
}
} // namespace armnnOpaqueDelegate
diff --git a/delegate/opaque/src/ElementwiseUnary.hpp b/delegate/opaque/src/ElementwiseUnary.hpp
index df848469b1..24b851f6e2 100644
--- a/delegate/opaque/src/ElementwiseUnary.hpp
+++ b/delegate/opaque/src/ElementwiseUnary.hpp
@@ -119,7 +119,8 @@ TfLiteStatus VisitElementwiseUnaryOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddElementwiseUnaryLayer(descriptor);
+ auto layerName = GetName(descriptor.m_Operation, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddElementwiseUnaryLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -127,7 +128,7 @@ TfLiteStatus VisitElementwiseUnaryOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/opaque/src/Fill.hpp b/delegate/opaque/src/Fill.hpp
index a8cdf3a56f..fe27255590 100644
--- a/delegate/opaque/src/Fill.hpp
+++ b/delegate/opaque/src/Fill.hpp
@@ -112,7 +112,8 @@ namespace armnnOpaqueDelegate
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddFillLayer(descriptor);
+ auto layerName = GetName(armnn::LayerType::Fill, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddFillLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -122,7 +123,8 @@ namespace armnnOpaqueDelegate
auto inputsTensorsProcess = ProcessInputs(layer,
delegateData,
tfLiteContext,
- tfLiteNode);
+ tfLiteNode,
+ nodeIndex);
if (inputsTensorsProcess == kTfLiteError)
{
return inputsTensorsProcess;
diff --git a/delegate/opaque/src/FullyConnected.hpp b/delegate/opaque/src/FullyConnected.hpp
index 3282cab543..7be06683a5 100644
--- a/delegate/opaque/src/FullyConnected.hpp
+++ b/delegate/opaque/src/FullyConnected.hpp
@@ -186,7 +186,8 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddFullyConnectedLayer(descriptor);
+ auto layerName = GetName(armnn::LayerType::FullyConnected, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddFullyConnectedLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -270,7 +271,8 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData,
layer,
reshapedOutputTensorInfo,
outputTensorInfo,
- delegateData);
+ delegateData,
+ nodeIndex);
if (!layer)
{
TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
@@ -289,7 +291,7 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData,
}
// Check and Create Activation
- return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData, nodeIndex);
}
} // namespace armnnOpaqueDelegate
diff --git a/delegate/opaque/src/Gather.hpp b/delegate/opaque/src/Gather.hpp
index b27016e06a..73bb8a0062 100644
--- a/delegate/opaque/src/Gather.hpp
+++ b/delegate/opaque/src/Gather.hpp
@@ -109,7 +109,8 @@ TfLiteStatus VisitGatherOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddGatherLayer(gatherDescriptor);
+ auto layerName = GetName(armnn::LayerType::Gather, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddGatherLayer(gatherDescriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -117,7 +118,8 @@ TfLiteStatus VisitGatherOperator(DelegateData& delegateData,
auto inputsTensorsProcess = ProcessInputs(layer,
delegateData,
tfLiteContext,
- tfLiteNode);
+ tfLiteNode,
+ nodeIndex);
if (inputsTensorsProcess == kTfLiteError)
{
return inputsTensorsProcess;
diff --git a/delegate/opaque/src/GatherNd.hpp b/delegate/opaque/src/GatherNd.hpp
index a767d01ad4..cab68da0e3 100644
--- a/delegate/opaque/src/GatherNd.hpp
+++ b/delegate/opaque/src/GatherNd.hpp
@@ -82,7 +82,8 @@ TfLiteStatus VisitGatherNdOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddGatherNdLayer();
+ auto layerName = GetName(armnn::LayerType::GatherNd, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddGatherNdLayer(layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -90,7 +91,8 @@ TfLiteStatus VisitGatherNdOperator(DelegateData& delegateData,
auto inputsTensorsProcess = ProcessInputs(layer,
delegateData,
tfLiteContext,
- tfLiteNode);
+ tfLiteNode,
+ nodeIndex);
if (inputsTensorsProcess == kTfLiteError)
{
return inputsTensorsProcess;
diff --git a/delegate/opaque/src/LogicalBinary.hpp b/delegate/opaque/src/LogicalBinary.hpp
index 44a443bb4d..3bac72bf56 100644
--- a/delegate/opaque/src/LogicalBinary.hpp
+++ b/delegate/opaque/src/LogicalBinary.hpp
@@ -119,7 +119,9 @@ TfLiteStatus VisitLogicalBinaryOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* logicalBinaryLayer = delegateData.m_Network->AddLogicalBinaryLayer(desc);
+ auto layerName = GetName(desc.m_Operation, nodeIndex);
+ armnn::IConnectableLayer* logicalBinaryLayer = delegateData.m_Network->AddLogicalBinaryLayer(desc,
+ layerName.c_str());
logicalBinaryLayer->SetBackendId(setBackend);
ARMNN_ASSERT(logicalBinaryLayer != nullptr);
@@ -129,7 +131,8 @@ TfLiteStatus VisitLogicalBinaryOperator(DelegateData& delegateData,
auto inputsTensorsProcess = ProcessInputs(logicalBinaryLayer,
delegateData,
tfLiteContext,
- tfLiteNode);
+ tfLiteNode,
+ nodeIndex);
if (inputsTensorsProcess == kTfLiteError)
{
return inputsTensorsProcess;
diff --git a/delegate/opaque/src/Lstm.hpp b/delegate/opaque/src/Lstm.hpp
index b896b462d3..439e401315 100644
--- a/delegate/opaque/src/Lstm.hpp
+++ b/delegate/opaque/src/Lstm.hpp
@@ -266,7 +266,8 @@ TfLiteStatus VisitLstmOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddLstmLayer(desc, params);
+ auto layerName = GetName(armnn::LayerType::Lstm, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddLstmLayer(desc, params, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
diff --git a/delegate/opaque/src/Normalization.hpp b/delegate/opaque/src/Normalization.hpp
index c6ac6761d8..181e6e2fcd 100644
--- a/delegate/opaque/src/Normalization.hpp
+++ b/delegate/opaque/src/Normalization.hpp
@@ -82,7 +82,8 @@ TfLiteStatus VisitL2NormalizationOperator(DelegateData& delegateData,
}
// Add a L2Normalization layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddL2NormalizationLayer(descriptor);
+ auto layerName = GetName(armnn::LayerType::L2Normalization, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddL2NormalizationLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -90,7 +91,7 @@ TfLiteStatus VisitL2NormalizationOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
@@ -183,7 +184,8 @@ TfLiteStatus VisitLocalResponseNormalizationOperator(DelegateData& delegateData,
}
// Add a Normalization layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddNormalizationLayer(descriptor);
+ auto layerName = GetName(armnn::LayerType::Normalization, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddNormalizationLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -191,7 +193,7 @@ TfLiteStatus VisitLocalResponseNormalizationOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/opaque/src/OpaqueDelegateUtils.hpp b/delegate/opaque/src/OpaqueDelegateUtils.hpp
index 1c90ee0722..7c9f0c8b08 100644
--- a/delegate/opaque/src/OpaqueDelegateUtils.hpp
+++ b/delegate/opaque/src/OpaqueDelegateUtils.hpp
@@ -10,6 +10,7 @@
#include <armnn/ArmNN.hpp>
#include <armnn/BackendHelper.hpp>
+#include <armnn/TypesUtils.hpp>
#include <armnn/utility/Assert.hpp>
#include <armnn/utility/NumericCast.hpp>
@@ -23,8 +24,44 @@
#include <tensorflow/lite/minimal_logging.h>
#include <tensorflow/lite/kernels/kernel_util.h>
+#include <fmt/format.h>
+
namespace
{
+std::string GetName(armnn::ActivationFunction function, int nodeIndex)
+{
+ return fmt::format("{}:{}", GetActivationFunctionAsCString(function), nodeIndex);
+}
+
+std::string GetName(armnn::ArgMinMaxFunction function, int nodeIndex)
+{
+ return fmt::format("{}:{}", GetArgMinMaxFunctionAsCString(function), nodeIndex);
+}
+
+std::string GetName(armnn::BinaryOperation opType, int nodeIndex)
+{
+ return fmt::format("{}:{}", GetBinaryOperationAsCString(opType), nodeIndex);
+}
+
+std::string GetName(armnn::ComparisonOperation layerType, int nodeIndex)
+{
+ return fmt::format("{}:{}", GetComparisonOperationAsCString(layerType), nodeIndex);
+}
+
+std::string GetName(armnn::LogicalBinaryOperation operation, int nodeIndex)
+{
+ return fmt::format("{}:{}", GetLogicalBinaryOperationAsCString(operation), nodeIndex);
+}
+
+std::string GetName(armnn::UnaryOperation opType, int nodeIndex)
+{
+ return fmt::format("{}:{}", GetUnaryOperationAsCString(opType), nodeIndex);
+}
+
+std::string GetName(armnn::LayerType layerType, int nodeIndex, std::string subname = "")
+{
+ return fmt::format("{}{}:{}", GetLayerTypeAsCString(layerType), subname, nodeIndex);
+}
// Macro to call an Is<layer_name>Supported function and log caller name together with reason for lack of support
#define FORWARD_LAYER_OPAQUE_SUPPORT_FUNC(opName, tfLiteContext, func, backends, supported, setBackend, ...) \
@@ -225,7 +262,8 @@ TfLiteStatus FusedActivation(TfLiteOpaqueContext* tfLiteContext,
TfLiteFusedActivation activationType,
armnn::IConnectableLayer* prevLayer,
unsigned int outputSlotIndex,
- armnnOpaqueDelegate::DelegateData& data)
+ armnnOpaqueDelegate::DelegateData& data,
+ int nodeIndex)
{
const armnn::TensorInfo& activationOutputInfo = prevLayer->GetOutputSlot(outputSlotIndex).GetTensorInfo();
@@ -288,7 +326,8 @@ TfLiteStatus FusedActivation(TfLiteOpaqueContext* tfLiteContext,
{
return kTfLiteError;
}
- armnn::IConnectableLayer* activationLayer = data.m_Network->AddActivationLayer(activationDesc);
+ auto layerName = GetName(activationDesc.m_Function, nodeIndex);
+ armnn::IConnectableLayer* activationLayer = data.m_Network->AddActivationLayer(activationDesc, layerName.c_str());
activationLayer->SetBackendId(setBackend);
ARMNN_ASSERT(activationLayer != nullptr);
@@ -322,7 +361,8 @@ armnn::IConnectableLayer* AddReshapeLayer(TfLiteOpaqueContext* tfLiteContext,
armnn::IConnectableLayer* prevLayer,
armnn::TensorInfo reshapedOutputTensorInfo,
armnn::TensorInfo outputTensorInfo,
- armnnOpaqueDelegate::DelegateData& data)
+ armnnOpaqueDelegate::DelegateData& data,
+ int nodeIndex)
{
armnn::ReshapeDescriptor desc;
desc.m_TargetShape = outputTensorInfo.GetShape();
@@ -344,7 +384,8 @@ armnn::IConnectableLayer* AddReshapeLayer(TfLiteOpaqueContext* tfLiteContext,
return nullptr;
}
- armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(desc);
+ auto layerName = GetName(armnn::LayerType::Reshape, nodeIndex);
+ armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(desc, layerName.c_str());
reshapeLayer->SetBackendId(setBackend);
ARMNN_ASSERT(reshapeLayer != nullptr);
@@ -570,7 +611,8 @@ bool IsOptionalOperandPresent(TfLiteOpaqueNode* tfLiteNode, const int operandInd
TfLiteStatus ProcessInputs(armnn::IConnectableLayer* layer,
armnnOpaqueDelegate::DelegateData& delegateData,
TfLiteOpaqueContext* tfLiteContext,
- TfLiteOpaqueNode* tfLiteNode)
+ TfLiteOpaqueNode* tfLiteNode,
+ int nodeIndex)
{
// Get array of input indices, inputIndexArray is set from the TfLiteOpaqueNodeInputs function
// This function turns inputIndexArray into an int array of indices. These indices point to the index of the
@@ -610,7 +652,9 @@ TfLiteStatus ProcessInputs(armnn::IConnectableLayer* layer,
auto constantInput = CreateConstTensor(tfLiteInputTensor, inputTensorInfo);
- armnn::IConnectableLayer* constantLayer = delegateData.m_Network->AddConstantLayer(constantInput);
+ auto layerName = GetName(armnn::LayerType::Constant, nodeIndex);
+ armnn::IConnectableLayer* constantLayer = delegateData.m_Network->AddConstantLayer(constantInput,
+ layerName.c_str());
constantLayer->SetBackendId(setBackend);
armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
outputSlot.SetTensorInfo(inputTensorInfo);
diff --git a/delegate/opaque/src/Pack.hpp b/delegate/opaque/src/Pack.hpp
index c3ea7da7f7..5a05232e3b 100644
--- a/delegate/opaque/src/Pack.hpp
+++ b/delegate/opaque/src/Pack.hpp
@@ -121,7 +121,8 @@ TfLiteStatus VisitPackOperator(DelegateData& delegateData,
}
// The TfLite Pack operator is equivalent to the ArmNN Stack operator
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddStackLayer(desc);
+ auto layerName = GetName(armnn::LayerType::Stack, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddStackLayer(desc, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -129,7 +130,8 @@ TfLiteStatus VisitPackOperator(DelegateData& delegateData,
auto inputsTensorsProcess = ProcessInputs(layer,
delegateData,
tfLiteContext,
- tfLiteNode);
+ tfLiteNode,
+ nodeIndex);
if (inputsTensorsProcess == kTfLiteError)
{
return inputsTensorsProcess;
diff --git a/delegate/opaque/src/Pad.hpp b/delegate/opaque/src/Pad.hpp
index 112e7bb152..4305224003 100644
--- a/delegate/opaque/src/Pad.hpp
+++ b/delegate/opaque/src/Pad.hpp
@@ -182,7 +182,8 @@ TfLiteStatus VisitPadOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* padLayer = delegateData.m_Network->AddPadLayer(descriptor);
+ auto layerName = GetName(armnn::LayerType::Pad, nodeIndex);
+ armnn::IConnectableLayer* padLayer = delegateData.m_Network->AddPadLayer(descriptor, layerName.c_str());
padLayer->SetBackendId(setBackend);
ARMNN_ASSERT(padLayer != nullptr);
diff --git a/delegate/opaque/src/Pooling.hpp b/delegate/opaque/src/Pooling.hpp
index 45a10f3833..8e6500c1cb 100644
--- a/delegate/opaque/src/Pooling.hpp
+++ b/delegate/opaque/src/Pooling.hpp
@@ -131,7 +131,8 @@ TfLiteStatus VisitPooling2dOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling2dLayer(descriptor);
+ auto layerName = GetName(armnn::LayerType::Pooling2d, nodeIndex);
+ armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling2dLayer(descriptor, layerName.c_str());
poolingLayer->SetBackendId(setBackend);
ARMNN_ASSERT(poolingLayer != nullptr);
@@ -139,18 +140,18 @@ TfLiteStatus VisitPooling2dOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(poolingLayer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(poolingLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
- if(Connect(poolingLayer, tfLiteContext, tfLiteNode, delegateData) != kTfLiteOk)
+ if (Connect(poolingLayer, tfLiteContext, tfLiteNode, delegateData) != kTfLiteOk)
{
return kTfLiteError;
}
// Check and create activation
- return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData);
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData, nodeIndex);
}
TfLiteStatus VisitPooling3dOperator(DelegateData& delegateData,
@@ -344,7 +345,8 @@ TfLiteStatus VisitPooling3dOperator(DelegateData& delegateData,
}
// Create the Layer
- armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling3dLayer(descriptor);
+ auto layerName = GetName(armnn::LayerType::Pooling3d, nodeIndex);
+ armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling3dLayer(descriptor, layerName.c_str());
poolingLayer->SetBackendId(setBackend);
ARMNN_ASSERT(poolingLayer != nullptr);
@@ -353,17 +355,17 @@ TfLiteStatus VisitPooling3dOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(poolingLayer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(poolingLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
- if(Connect(poolingLayer, tfLiteContext, tfLiteNode, delegateData) != kTfLiteOk)
+ if (Connect(poolingLayer, tfLiteContext, tfLiteNode, delegateData) != kTfLiteOk)
{
return kTfLiteError;
}
- return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData);
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData, nodeIndex);
}
} // namespace armnnOpaqueDelegate
diff --git a/delegate/opaque/src/Prelu.hpp b/delegate/opaque/src/Prelu.hpp
index 1a4037eb35..1c9f06d8b8 100644
--- a/delegate/opaque/src/Prelu.hpp
+++ b/delegate/opaque/src/Prelu.hpp
@@ -98,7 +98,8 @@ TfLiteStatus VisitPreluOperator(DelegateData& delegateData,
outputTensorInfo);
}
- armnn::IConnectableLayer* preluLayer = delegateData.m_Network->AddPreluLayer();
+ auto layerName = GetName(armnn::LayerType::Prelu, nodeIndex);
+ armnn::IConnectableLayer* preluLayer = delegateData.m_Network->AddPreluLayer(layerName.c_str());
ARMNN_ASSERT(preluLayer != nullptr);
bool isConstantAlpha = IsConstantTensor(tfLiteAlphaTensor);
@@ -108,7 +109,9 @@ TfLiteStatus VisitPreluOperator(DelegateData& delegateData,
{
auto constAlphaTensor = armnn::ConstTensor(alphaTensorInfo, TfLiteOpaqueTensorData(tfLiteAlphaTensor));
- armnn::IConnectableLayer* constLayer = delegateData.m_Network->AddConstantLayer(constAlphaTensor);
+ auto alphaName = GetName(armnn::LayerType::Constant, nodeIndex, "Alpha");
+ armnn::IConnectableLayer* constLayer = delegateData.m_Network->AddConstantLayer(constAlphaTensor,
+ alphaName.c_str());
ARMNN_ASSERT(constLayer != nullptr);
constLayer->GetOutputSlot(0).SetTensorInfo(alphaTensorInfo);
diff --git a/delegate/opaque/src/Quantization.hpp b/delegate/opaque/src/Quantization.hpp
index 7a1dd6fd17..d7f5c5c73f 100644
--- a/delegate/opaque/src/Quantization.hpp
+++ b/delegate/opaque/src/Quantization.hpp
@@ -79,7 +79,8 @@ TfLiteStatus VisitDequantizeOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* dequantizeLayer = delegateData.m_Network->AddDequantizeLayer();
+ auto layerName = GetName(armnn::LayerType::Dequantize, nodeIndex);
+ armnn::IConnectableLayer* dequantizeLayer = delegateData.m_Network->AddDequantizeLayer(layerName.c_str());
dequantizeLayer->SetBackendId(setBackend);
ARMNN_ASSERT(dequantizeLayer != nullptr);
@@ -89,7 +90,8 @@ TfLiteStatus VisitDequantizeOperator(DelegateData& delegateData,
auto inputsTensorsProcess = ProcessInputs(dequantizeLayer,
delegateData,
tfLiteContext,
- tfLiteNode);
+ tfLiteNode,
+ nodeIndex);
if (inputsTensorsProcess == kTfLiteError)
{
return inputsTensorsProcess;
@@ -176,7 +178,8 @@ TfLiteStatus VisitQuantizeOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* quantizeLayer = delegateData.m_Network->AddQuantizeLayer();
+ auto layerName = GetName(armnn::LayerType::Quantize, nodeIndex);
+ armnn::IConnectableLayer* quantizeLayer = delegateData.m_Network->AddQuantizeLayer(layerName.c_str());
quantizeLayer->SetBackendId(setBackend);
ARMNN_ASSERT(quantizeLayer != nullptr);
@@ -184,7 +187,7 @@ TfLiteStatus VisitQuantizeOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(quantizeLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(quantizeLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/opaque/src/Redefine.hpp b/delegate/opaque/src/Redefine.hpp
index ce90af0812..5ce7a3dcc1 100644
--- a/delegate/opaque/src/Redefine.hpp
+++ b/delegate/opaque/src/Redefine.hpp
@@ -73,7 +73,8 @@ TfLiteStatus VisitCastOperator(DelegateData& delegateData,
}
// Add a Cast layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddCastLayer();
+ auto layerName = GetName(armnn::LayerType::Cast, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddCastLayer(layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -81,7 +82,7 @@ TfLiteStatus VisitCastOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk)
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
@@ -242,7 +243,8 @@ TfLiteStatus VisitReshapeOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc);
+ auto layerName = GetName(armnn::LayerType::Reshape, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -250,7 +252,7 @@ TfLiteStatus VisitReshapeOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
@@ -350,7 +352,8 @@ TfLiteStatus VisitSqueezeOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc);
+ auto layerName = GetName(armnn::LayerType::Reshape, nodeIndex, "Squeeze");
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -358,7 +361,7 @@ TfLiteStatus VisitSqueezeOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk)
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
@@ -478,7 +481,8 @@ TfLiteStatus VisitExpandDimsOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc);
+ auto layerName = GetName(armnn::LayerType::Reshape, nodeIndex, "ExpandDims");
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -487,7 +491,7 @@ TfLiteStatus VisitExpandDimsOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk)
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/opaque/src/Reduce.hpp b/delegate/opaque/src/Reduce.hpp
index afea7aafb0..a7948ae98d 100644
--- a/delegate/opaque/src/Reduce.hpp
+++ b/delegate/opaque/src/Reduce.hpp
@@ -147,7 +147,8 @@ TfLiteStatus VisitReduceOperator(DelegateData& delegateData,
}
// Add an Reduce layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddReduceLayer(desc);
+ auto layerName = GetName(armnn::LayerType::Reduce, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddReduceLayer(desc, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -155,7 +156,7 @@ TfLiteStatus VisitReduceOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/opaque/src/Resize.hpp b/delegate/opaque/src/Resize.hpp
index 509ae62524..948b600625 100644
--- a/delegate/opaque/src/Resize.hpp
+++ b/delegate/opaque/src/Resize.hpp
@@ -203,13 +203,16 @@ TfLiteStatus VisitResizeOperator(DelegateData& delegateData,
armnn::IConnectableLayer* resizeLayer = nullptr;
+ layerName += ":";
+ layerName += nodeIndex;
+
resizeLayer = delegateData.m_Network->AddResizeLayer(desc, layerName.c_str());
armnn::IOutputSlot& outputSlot = resizeLayer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(resizeLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(resizeLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/opaque/src/ReverseV2.hpp b/delegate/opaque/src/ReverseV2.hpp
index e5714f4576..5291aac418 100644
--- a/delegate/opaque/src/ReverseV2.hpp
+++ b/delegate/opaque/src/ReverseV2.hpp
@@ -127,8 +127,6 @@ TfLiteStatus VisitReverseV2Operator(DelegateData& delegateData,
}
}
- std::string layerName("ReverseV2");
-
// Get axis tensor data
auto axisTensorNumValues = static_cast<unsigned int>(TfLiteOpaqueTensorDim(tfLiteAxisTensor,0));
@@ -155,13 +153,14 @@ TfLiteStatus VisitReverseV2Operator(DelegateData& delegateData,
outputTensorInfo);
}
+ auto layerName = GetName(armnn::LayerType::ReverseV2, nodeIndex);
armnn::IConnectableLayer* reverseV2Layer = delegateData.m_Network->AddReverseV2Layer(layerName.c_str());
armnn::IOutputSlot& outputSlot = reverseV2Layer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(reverseV2Layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(reverseV2Layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/opaque/src/Round.hpp b/delegate/opaque/src/Round.hpp
index c64c210301..4064b6361c 100644
--- a/delegate/opaque/src/Round.hpp
+++ b/delegate/opaque/src/Round.hpp
@@ -72,14 +72,15 @@ TfLiteStatus VisitFloorOperator(DelegateData& delegateData,
}
// Add a Floor layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddFloorLayer();
+ auto layerName = GetName(armnn::LayerType::Floor, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddFloorLayer(layerName.c_str());
ARMNN_ASSERT(layer != nullptr);
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/opaque/src/Shape.hpp b/delegate/opaque/src/Shape.hpp
index 4c37c3801d..9f15a4f739 100644
--- a/delegate/opaque/src/Shape.hpp
+++ b/delegate/opaque/src/Shape.hpp
@@ -59,7 +59,7 @@ TfLiteStatus VisitShapeOperator(DelegateData& delegateData,
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
auto* shapeParameters = reinterpret_cast<TfLiteShapeParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
- if ( shapeParameters->out_type != kTfLiteInt32 && shapeParameters->out_type != kTfLiteInt64 )
+ if (shapeParameters->out_type != kTfLiteInt32 && shapeParameters->out_type != kTfLiteInt64)
{
TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
tfLiteContext,
@@ -92,7 +92,8 @@ TfLiteStatus VisitShapeOperator(DelegateData& delegateData,
}
// Add a Shape layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddShapeLayer();
+ auto layerName = GetName(armnn::LayerType::Shape, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddShapeLayer(layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -100,7 +101,7 @@ TfLiteStatus VisitShapeOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/opaque/src/Slice.hpp b/delegate/opaque/src/Slice.hpp
index e39e4afcec..7876b7b398 100644
--- a/delegate/opaque/src/Slice.hpp
+++ b/delegate/opaque/src/Slice.hpp
@@ -6,7 +6,6 @@
#pragma once
#include <OpaqueDelegateUtils.hpp>
-#include <fmt/format.h>
namespace armnnOpaqueDelegate
{
@@ -169,9 +168,9 @@ TfLiteStatus VisitSliceOperator(DelegateData& delegateData,
validateFunc(outputTensorInfo, isSupported);
return isSupported ? kTfLiteOk : kTfLiteError;
}
- auto layerName = fmt::format("Slice:{}", nodeIndex);
// Add a Slice layer
+ auto layerName = GetName(armnn::LayerType::Slice, nodeIndex);
armnn::IConnectableLayer* layer = delegateData.m_Network->AddSliceLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -180,7 +179,7 @@ TfLiteStatus VisitSliceOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/opaque/src/Softmax.hpp b/delegate/opaque/src/Softmax.hpp
index 87927616ff..31fe1c945e 100644
--- a/delegate/opaque/src/Softmax.hpp
+++ b/delegate/opaque/src/Softmax.hpp
@@ -125,6 +125,8 @@ TfLiteStatus VisitSoftmaxOperator(DelegateData& delegateData,
}
armnn::IConnectableLayer* softmaxLayer = nullptr;
+ auto layerName = GetName(armnn::LayerType::Softmax, nodeIndex);
+
switch(tfliteSoftmaxOperatorCode)
{
case kTfLiteBuiltinSoftmax:
@@ -132,13 +134,13 @@ TfLiteStatus VisitSoftmaxOperator(DelegateData& delegateData,
armnn::SoftmaxDescriptor descriptor;
auto* nodeParameters = reinterpret_cast<TfLiteSoftmaxParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
descriptor.m_Beta = nodeParameters->beta;
- softmaxLayer = delegateData.m_Network->AddSoftmaxLayer(descriptor);
+ softmaxLayer = delegateData.m_Network->AddSoftmaxLayer(descriptor, layerName.c_str());
break;
}
case kTfLiteBuiltinLogSoftmax:
{
armnn::LogSoftmaxDescriptor descriptor;
- softmaxLayer = delegateData.m_Network->AddLogSoftmaxLayer(descriptor);
+ softmaxLayer = delegateData.m_Network->AddLogSoftmaxLayer(descriptor, layerName.c_str());
break;
}
default:
@@ -150,7 +152,7 @@ TfLiteStatus VisitSoftmaxOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(softmaxLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(softmaxLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/opaque/src/SpaceDepth.hpp b/delegate/opaque/src/SpaceDepth.hpp
index 9cc61eb603..a1c5544a4e 100644
--- a/delegate/opaque/src/SpaceDepth.hpp
+++ b/delegate/opaque/src/SpaceDepth.hpp
@@ -83,12 +83,13 @@ TfLiteStatus VisitSpaceToDepthOperator(DelegateData& delegateData,
}
// Add a SpaceToDepth layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddSpaceToDepthLayer(descriptor);
+ auto layerName = GetName(armnn::LayerType::SpaceToDepth, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddSpaceToDepthLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
@@ -173,7 +174,8 @@ TfLiteStatus VisitDepthToSpaceOperator(DelegateData& delegateData,
}
// Add a DepthToSpace layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthToSpaceLayer(descriptor);
+ auto layerName = GetName(armnn::LayerType::DepthToSpace, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthToSpaceLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -181,7 +183,7 @@ TfLiteStatus VisitDepthToSpaceOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/opaque/src/Split.hpp b/delegate/opaque/src/Split.hpp
index d3d00e4d63..aec0fb674a 100644
--- a/delegate/opaque/src/Split.hpp
+++ b/delegate/opaque/src/Split.hpp
@@ -157,7 +157,8 @@ TfLiteStatus VisitSplitOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddSplitterLayer(splitDescriptor);
+ auto layerName = GetName(armnn::LayerType::Splitter, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddSplitterLayer(splitDescriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -391,7 +392,8 @@ TfLiteStatus VisitSplitVOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddSplitterLayer(splitDescriptor);
+ auto layerName = GetName(armnn::LayerType::Splitter, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddSplitterLayer(splitDescriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -401,7 +403,7 @@ TfLiteStatus VisitSplitVOperator(DelegateData& delegateData,
}
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/opaque/src/StridedSlice.hpp b/delegate/opaque/src/StridedSlice.hpp
index 9ac3342fce..2e17e3292f 100644
--- a/delegate/opaque/src/StridedSlice.hpp
+++ b/delegate/opaque/src/StridedSlice.hpp
@@ -153,7 +153,8 @@ TfLiteStatus VisitStridedSliceOperator(DelegateData& delegateData,
}
// Add a StridedSlice layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddStridedSliceLayer(descriptor);
+ auto layerName = GetName(armnn::LayerType::StridedSlice, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddStridedSliceLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -161,7 +162,7 @@ TfLiteStatus VisitStridedSliceOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/opaque/src/Tile.hpp b/delegate/opaque/src/Tile.hpp
index 17cbdee7eb..0ad65ca54c 100644
--- a/delegate/opaque/src/Tile.hpp
+++ b/delegate/opaque/src/Tile.hpp
@@ -167,7 +167,7 @@ TfLiteStatus VisitTileOperator(DelegateData& delegateData,
tileDescriptor);
}
- std::string layerName("Tile");
+ auto layerName = GetName(armnn::LayerType::Tile, nodeIndex);
armnn::IConnectableLayer* layer = delegateData.m_Network->AddTileLayer(tileDescriptor, layerName.c_str());
if (layer == nullptr)
@@ -177,7 +177,7 @@ TfLiteStatus VisitTileOperator(DelegateData& delegateData,
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
- if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk)
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/opaque/src/Transpose.hpp b/delegate/opaque/src/Transpose.hpp
index 2627c42f1f..5af03b3790 100644
--- a/delegate/opaque/src/Transpose.hpp
+++ b/delegate/opaque/src/Transpose.hpp
@@ -94,7 +94,8 @@ TfLiteStatus VisitTransposeOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* transposeLayer = delegateData.m_Network->AddTransposeLayer(descriptor);
+ auto layerName = GetName(armnn::LayerType::Transpose, nodeIndex);
+ armnn::IConnectableLayer* transposeLayer = delegateData.m_Network->AddTransposeLayer(descriptor, layerName.c_str());
transposeLayer->SetBackendId(setBackend);
ARMNN_ASSERT(transposeLayer != nullptr);
// Permutation vector given to descriptor object
@@ -104,7 +105,7 @@ TfLiteStatus VisitTransposeOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(transposeLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(transposeLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/opaque/src/UnidirectionalSequenceLstm.hpp b/delegate/opaque/src/UnidirectionalSequenceLstm.hpp
index 790f287e22..2fd64c0dd0 100644
--- a/delegate/opaque/src/UnidirectionalSequenceLstm.hpp
+++ b/delegate/opaque/src/UnidirectionalSequenceLstm.hpp
@@ -320,7 +320,10 @@ TfLiteStatus VisitUnidirectionalSequenceLstmOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddUnidirectionalSequenceLstmLayer(desc, params);
+ auto layerName = GetName(armnn::LayerType::UnidirectionalSequenceLstm, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddUnidirectionalSequenceLstmLayer(desc,
+ params,
+ layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
diff --git a/delegate/opaque/src/Unpack.hpp b/delegate/opaque/src/Unpack.hpp
index 9b87bf7995..0956d1688e 100644
--- a/delegate/opaque/src/Unpack.hpp
+++ b/delegate/opaque/src/Unpack.hpp
@@ -187,10 +187,9 @@ TfLiteStatus VisitUnpackOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
};
- std::string splitterLayerName("Unpack Splitter");
-
+ auto layerName = GetName(armnn::LayerType::Splitter, nodeIndex, "Unpack");
armnn::IConnectableLayer* splitterLayer = delegateData.m_Network->AddSplitterLayer(splitDesc,
- splitterLayerName.c_str());
+ layerName.c_str());
splitterLayer->SetBackendId(setBackendSplit);
ARMNN_ASSERT(splitterLayer != nullptr);
@@ -206,7 +205,7 @@ TfLiteStatus VisitUnpackOperator(DelegateData& delegateData,
// Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
for (unsigned int outputIndex = 0; outputIndex < splitterLayer->GetNumOutputSlots(); ++outputIndex)
{
- std::string reshapeLayerName("Unpack Reshape");
+ auto reshapeLayerName = GetName(armnn::LayerType::Reshape, nodeIndex, "Unpack");
armnn::IConnectableLayer* reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor,
reshapeLayerName.c_str());
reshapeLayer->SetBackendId(setBackendReshape);