aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Kelly <mike.kelly@arm.com>2023-08-02 13:23:09 +0100
committerNikhil Raj Arm <nikhil.raj@arm.com>2023-08-03 20:29:40 +0000
commit07169c8cc8b73ba5f3550cb0e8688de73d438c7e (patch)
tree5c9447a52ac676268756a3421a0a8e933321e60a
parentb179382bcb4944d0137aa9799c3c56a2102ecda2 (diff)
downloadarmnn-07169c8cc8b73ba5f3550cb0e8688de73d438c7e.tar.gz
MLCE-1092 Added layerNames to classic delegate
* All layers added through the classic delegate will have a name that includes the nodeIndex from the tflite model. * Added utilities to ClassicDelegateUtils to get the names for the layers. Signed-off-by: Mike Kelly <mike.kelly@arm.com> Change-Id: Iac567486d1f91c0a99b77ed8963f6b6ca26b0b59
-rw-r--r--delegate/classic/src/Activation.hpp6
-rw-r--r--delegate/classic/src/ArgMinMax.hpp5
-rw-r--r--delegate/classic/src/BatchMatMul.hpp5
-rw-r--r--delegate/classic/src/BatchSpace.hpp7
-rw-r--r--delegate/classic/src/ClassicDelegateUtils.hpp58
-rw-r--r--delegate/classic/src/Comparison.hpp6
-rw-r--r--delegate/classic/src/Control.hpp14
-rw-r--r--delegate/classic/src/Convolution.hpp58
-rw-r--r--delegate/classic/src/ElementwiseBinary.hpp44
-rw-r--r--delegate/classic/src/ElementwiseUnary.hpp5
-rw-r--r--delegate/classic/src/Fill.hpp6
-rw-r--r--delegate/classic/src/FullyConnected.hpp24
-rw-r--r--delegate/classic/src/Gather.hpp6
-rw-r--r--delegate/classic/src/GatherNd.hpp6
-rw-r--r--delegate/classic/src/LogicalBinary.hpp7
-rw-r--r--delegate/classic/src/Lstm.hpp3
-rw-r--r--delegate/classic/src/Normalization.hpp10
-rw-r--r--delegate/classic/src/Pack.hpp6
-rw-r--r--delegate/classic/src/Pad.hpp3
-rw-r--r--delegate/classic/src/Pooling.hpp16
-rw-r--r--delegate/classic/src/Prelu.hpp7
-rw-r--r--delegate/classic/src/Quantization.hpp11
-rw-r--r--delegate/classic/src/Redefine.hpp20
-rw-r--r--delegate/classic/src/Reduce.hpp7
-rw-r--r--delegate/classic/src/Resize.hpp6
-rw-r--r--delegate/classic/src/ReverseV2.hpp5
-rw-r--r--delegate/classic/src/Round.hpp5
-rw-r--r--delegate/classic/src/Shape.hpp7
-rw-r--r--delegate/classic/src/Slice.hpp4
-rw-r--r--delegate/classic/src/Softmax.hpp7
-rw-r--r--delegate/classic/src/SpaceDepth.hpp10
-rw-r--r--delegate/classic/src/Split.hpp5
-rw-r--r--delegate/classic/src/StridedSlice.hpp5
-rw-r--r--delegate/classic/src/Tile.hpp4
-rw-r--r--delegate/classic/src/Transpose.hpp5
-rw-r--r--delegate/classic/src/UnidirectionalSequenceLstm.hpp5
-rw-r--r--delegate/classic/src/Unpack.hpp10
37 files changed, 277 insertions, 141 deletions
diff --git a/delegate/classic/src/Activation.hpp b/delegate/classic/src/Activation.hpp
index 442ce4fac3..a93cee43a3 100644
--- a/delegate/classic/src/Activation.hpp
+++ b/delegate/classic/src/Activation.hpp
@@ -122,14 +122,16 @@ TfLiteStatus VisitActivationOperator(DelegateData& delegateData,
outputTensorInfo,
activationDesc);
}
- armnn::IConnectableLayer* activationLayer = delegateData.m_Network->AddActivationLayer(activationDesc);
+ auto layerName = GetLayerName(activationDesc.m_Function, nodeIndex);
+ armnn::IConnectableLayer* activationLayer = delegateData.m_Network->AddActivationLayer(activationDesc,
+ layerName.c_str());
ARMNN_ASSERT(activationLayer != nullptr);
armnn::IOutputSlot& outputSlot = activationLayer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(activationLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(activationLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/ArgMinMax.hpp b/delegate/classic/src/ArgMinMax.hpp
index 4e4a2a3f3a..3729b3bd83 100644
--- a/delegate/classic/src/ArgMinMax.hpp
+++ b/delegate/classic/src/ArgMinMax.hpp
@@ -112,7 +112,8 @@ TfLiteStatus VisitArgMinMaxOperator(DelegateData& delegateData,
}
// Add an ArgMinMax layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddArgMinMaxLayer(desc);
+ auto layerName = GetLayerName(desc.m_Function, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddArgMinMaxLayer(desc, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -120,7 +121,7 @@ TfLiteStatus VisitArgMinMaxOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/BatchMatMul.hpp b/delegate/classic/src/BatchMatMul.hpp
index 94b25fe7b5..1caa354d4d 100644
--- a/delegate/classic/src/BatchMatMul.hpp
+++ b/delegate/classic/src/BatchMatMul.hpp
@@ -90,7 +90,8 @@ namespace armnnDelegate
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddBatchMatMulLayer(descriptor);
+ auto layerName = GetLayerName(armnn::LayerType::BatchMatMul, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddBatchMatMulLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -98,7 +99,7 @@ namespace armnnDelegate
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/BatchSpace.hpp b/delegate/classic/src/BatchSpace.hpp
index 30c6dbfc15..07491cee0d 100644
--- a/delegate/classic/src/BatchSpace.hpp
+++ b/delegate/classic/src/BatchSpace.hpp
@@ -96,7 +96,8 @@ TfLiteStatus VisitBatchToSpaceNdOperator(DelegateData& delegateData,
}
// Add a BatchToSpace layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddBatchToSpaceNdLayer(descriptor);
+ auto layerName = GetLayerName(armnn::LayerType::BatchToSpaceNd, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddBatchToSpaceNdLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -104,7 +105,7 @@ TfLiteStatus VisitBatchToSpaceNdOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
@@ -204,7 +205,7 @@ TfLiteStatus VisitSpaceToBatchNdOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/ClassicDelegateUtils.hpp b/delegate/classic/src/ClassicDelegateUtils.hpp
index 2806716334..8a9409df6a 100644
--- a/delegate/classic/src/ClassicDelegateUtils.hpp
+++ b/delegate/classic/src/ClassicDelegateUtils.hpp
@@ -10,6 +10,7 @@
#include <armnn/ArmNN.hpp>
#include <armnn/BackendHelper.hpp>
+#include <armnn/TypesUtils.hpp>
#include <armnn/utility/Assert.hpp>
#include <armnn/utility/NumericCast.hpp>
@@ -22,6 +23,8 @@
#include <tensorflow/lite/minimal_logging.h>
#include <tensorflow/lite/kernels/kernel_util.h>
+#include <fmt/format.h>
+
namespace
{
@@ -71,6 +74,41 @@ catch (const armnn::InvalidArgumentException &e) \
throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
}
+std::string GetLayerName(armnn::ActivationFunction function, int nodeIndex)
+{
+ return fmt::format("{}:{}", GetActivationFunctionAsCString(function), nodeIndex);
+}
+
+std::string GetLayerName(armnn::ArgMinMaxFunction function, int nodeIndex)
+{
+ return fmt::format("{}:{}", GetArgMinMaxFunctionAsCString(function), nodeIndex);
+}
+
+std::string GetLayerName(armnn::BinaryOperation opType, int nodeIndex)
+{
+ return fmt::format("{}:{}", GetBinaryOperationAsCString(opType), nodeIndex);
+}
+
+std::string GetLayerName(armnn::ComparisonOperation layerType, int nodeIndex)
+{
+ return fmt::format("{}:{}", GetComparisonOperationAsCString(layerType), nodeIndex);
+}
+
+std::string GetLayerName(armnn::LogicalBinaryOperation operation, int nodeIndex)
+{
+ return fmt::format("{}:{}", GetLogicalBinaryOperationAsCString(operation), nodeIndex);
+}
+
+std::string GetLayerName(armnn::UnaryOperation opType, int nodeIndex)
+{
+ return fmt::format("{}:{}", GetUnaryOperationAsCString(opType), nodeIndex);
+}
+
+std::string GetLayerName(armnn::LayerType layerType, int nodeIndex, std::string name = "")
+{
+ return fmt::format("{}{}:{}", GetLayerTypeAsCString(layerType), name, nodeIndex);
+}
+
TfLiteStatus ValidateNumInputs(TfLiteContext* tfLiteContext,
TfLiteNode* tfLiteNode,
const unsigned int expectedSize,
@@ -181,7 +219,8 @@ TfLiteStatus FusedActivation(TfLiteContext* tfLiteContext,
TfLiteFusedActivation activationType,
armnn::IConnectableLayer* prevLayer,
unsigned int outputSlotIndex,
- armnnDelegate::DelegateData& data)
+ armnnDelegate::DelegateData& data,
+ int nodeIndex)
{
const armnn::TensorInfo& activationOutputInfo = prevLayer->GetOutputSlot(outputSlotIndex).GetTensorInfo();
@@ -250,7 +289,8 @@ TfLiteStatus FusedActivation(TfLiteContext* tfLiteContext,
{
return kTfLiteError;
}
- armnn::IConnectableLayer* activationLayer = data.m_Network->AddActivationLayer(activationDesc);
+ auto layerName = GetLayerName(activationDesc.m_Function, nodeIndex);
+ armnn::IConnectableLayer* activationLayer = data.m_Network->AddActivationLayer(activationDesc, layerName.c_str());
activationLayer->SetBackendId(setBackend);
ARMNN_ASSERT(activationLayer != nullptr);
@@ -273,7 +313,8 @@ armnn::IConnectableLayer* AddReshapeLayer(TfLiteContext* tfLiteContext,
armnn::IConnectableLayer* prevLayer,
armnn::TensorInfo reshapedOutputTensorInfo,
armnn::TensorInfo outputTensorInfo,
- armnnDelegate::DelegateData& data)
+ armnnDelegate::DelegateData& data,
+ int nodeIndex)
{
armnn::ReshapeDescriptor desc;
desc.m_TargetShape = outputTensorInfo.GetShape();
@@ -295,7 +336,8 @@ armnn::IConnectableLayer* AddReshapeLayer(TfLiteContext* tfLiteContext,
return nullptr;
}
- armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(desc);
+ auto layerName = GetLayerName(armnn::LayerType::Reshape, nodeIndex);
+ armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(desc, layerName.c_str());
reshapeLayer->SetBackendId(setBackend);
ARMNN_ASSERT(reshapeLayer != nullptr);
@@ -478,7 +520,8 @@ bool IsOptionalOperandPresent(TfLiteNode* tfLiteNode, const int operandIndex)
TfLiteStatus ProcessInputs(armnn::IConnectableLayer* layer,
armnnDelegate::DelegateData& delegateData,
TfLiteContext* tfLiteContext,
- TfLiteNode* tfLiteNode)
+ TfLiteNode* tfLiteNode,
+ int nodeIndex)
{
const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
// Process input tensors
@@ -504,7 +547,10 @@ TfLiteStatus ProcessInputs(armnn::IConnectableLayer* layer,
}
auto constantInput = CreateConstTensor(&tfLiteInputTensor,
inputTensorInfo);
- armnn::IConnectableLayer* constantLayer = delegateData.m_Network->AddConstantLayer(constantInput);
+
+ auto layerName = GetLayerName(armnn::LayerType::Constant, nodeIndex);
+ armnn::IConnectableLayer* constantLayer = delegateData.m_Network->AddConstantLayer(constantInput,
+ layerName.c_str());
constantLayer->SetBackendId(setBackend);
armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
outputSlot.SetTensorInfo(inputTensorInfo);
diff --git a/delegate/classic/src/Comparison.hpp b/delegate/classic/src/Comparison.hpp
index 1db554cfbf..ead08d1724 100644
--- a/delegate/classic/src/Comparison.hpp
+++ b/delegate/classic/src/Comparison.hpp
@@ -117,7 +117,9 @@ TfLiteStatus VisitComparisonOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* comparisonLayer = delegateData.m_Network->AddComparisonLayer(descriptor);
+ auto layerName = GetLayerName(descriptor.m_Operation, nodeIndex);
+ armnn::IConnectableLayer* comparisonLayer = delegateData.m_Network->AddComparisonLayer(descriptor,
+ layerName.c_str());
comparisonLayer->SetBackendId(setBackend);
ARMNN_ASSERT(comparisonLayer != nullptr);
@@ -125,7 +127,7 @@ TfLiteStatus VisitComparisonOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(comparisonLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(comparisonLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/Control.hpp b/delegate/classic/src/Control.hpp
index e6779f360a..0adf262c23 100644
--- a/delegate/classic/src/Control.hpp
+++ b/delegate/classic/src/Control.hpp
@@ -127,7 +127,9 @@ TfLiteStatus VisitConcatenationOperator(DelegateData& delegateData,
}
// Setup layer and connect.
- armnn::IConnectableLayer* concatenationLayer = delegateData.m_Network->AddConcatLayer(concatDescriptor);
+ auto layerName = GetLayerName(armnn::LayerType::Concat, nodeIndex);
+ armnn::IConnectableLayer* concatenationLayer = delegateData.m_Network->AddConcatLayer(concatDescriptor,
+ layerName.c_str());
concatenationLayer->SetBackendId(setBackend);
ARMNN_ASSERT(concatenationLayer != nullptr);
@@ -135,7 +137,8 @@ TfLiteStatus VisitConcatenationOperator(DelegateData& delegateData,
auto inputsTensorsProcess = ProcessInputs(concatenationLayer,
delegateData,
tfLiteContext,
- tfLiteNode);
+ tfLiteNode,
+ nodeIndex);
if (inputsTensorsProcess == kTfLiteError)
{
return inputsTensorsProcess;
@@ -155,7 +158,7 @@ TfLiteStatus VisitConcatenationOperator(DelegateData& delegateData,
}
// Check and Create activation
- return FusedActivation(tfLiteContext, tfLiteNode, activationType, concatenationLayer, 0, delegateData);
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, concatenationLayer, 0, delegateData, nodeIndex);
}
TfLiteStatus VisitMeanOperator(DelegateData& delegateData,
@@ -271,7 +274,8 @@ TfLiteStatus VisitMeanOperator(DelegateData& delegateData,
}
// Setup layer and connect.
- armnn::IConnectableLayer* meanLayer = delegateData.m_Network->AddMeanLayer(desc);
+ auto layerName = GetLayerName(armnn::LayerType::Mean, nodeIndex);
+ armnn::IConnectableLayer* meanLayer = delegateData.m_Network->AddMeanLayer(desc, layerName.c_str());
meanLayer->SetBackendId(setBackend);
ARMNN_ASSERT(meanLayer != nullptr);
@@ -279,7 +283,7 @@ TfLiteStatus VisitMeanOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(meanLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(meanLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/Convolution.hpp b/delegate/classic/src/Convolution.hpp
index a44f9eef1d..cf0134ec1f 100644
--- a/delegate/classic/src/Convolution.hpp
+++ b/delegate/classic/src/Convolution.hpp
@@ -131,14 +131,16 @@ TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
}
// Set up filter and biases
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution2dLayer(descriptor);
+ auto layerName = GetLayerName(armnn::LayerType::Convolution2d, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution2dLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
- if(filterTensorInfo.IsConstant())
+ if (filterTensorInfo.IsConstant())
{
auto filter = CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[1]], filterTensorInfo);
- armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
+ auto filterName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Filter");
+ armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter, filterName.c_str());
weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
}
@@ -149,7 +151,10 @@ TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
if(biasTensorInfo.IsConstant())
{
auto biasTensor = CreateConstTensor(&tfLiteBiasTensor, biasTensorInfo);
- armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
+
+ auto biasName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Bias");
+ armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor,
+ biasName.c_str());
ARMNN_ASSERT(biasLayer != nullptr);
biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
@@ -157,11 +162,12 @@ TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
}
// The data input can also be constant, so we must check that this is also allocated to an input slot
- if(inputTensorInfo.IsConstant())
+ if (inputTensorInfo.IsConstant())
{
auto input = CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]], inputTensorInfo);
- armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input);
+ auto inputName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Input");
+ armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input, inputName.c_str());
inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
}
@@ -183,7 +189,7 @@ TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
}
// Check and Create activation
- return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData, nodeIndex);
}
// Conv3d is only correctly supported for external delegates from TF Lite v2.6, as there was a breaking bug in v2.5.
@@ -311,7 +317,8 @@ TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution3dLayer(descriptor);
+ auto layerName = GetLayerName(armnn::LayerType::Convolution3d, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution3dLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -321,7 +328,8 @@ TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
{
auto filter = CreateConstTensor(&tfLiteFilterTensor, filterTensorInfo);
- armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
+ auto filterName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Filter");
+ armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter, filterName.c_str());
ARMNN_ASSERT(weightsLayer != nullptr);
weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
@@ -335,7 +343,8 @@ TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
{
auto biases = CreateConstTensor(&tfLiteBiasTensor, biasTensorInfo);
- armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biases);
+ auto biasName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Bias");
+ armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biases, biasName.c_str());
ARMNN_ASSERT(biasLayer != nullptr);
biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
@@ -348,7 +357,8 @@ TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
{
auto input = CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]], inputTensorInfo);
- armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input);
+ auto inputName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Input");
+ armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input, inputName.c_str());
inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
}
@@ -368,7 +378,7 @@ TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
}
// Check and create activation
- return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData, nodeIndex);
}
#endif
@@ -485,7 +495,9 @@ TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthwiseConvolution2dLayer(descriptor);
+ auto layerName = GetLayerName(armnn::LayerType::Convolution3d, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthwiseConvolution2dLayer(descriptor,
+ layerName.c_str());
layer->SetBackendId(setBackend);
if(filterTensorInfo.IsConstant())
@@ -493,7 +505,8 @@ TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
// For depthwise the weights layout is the same as for tflite [1, H, W, I*M]. No permutation required.
auto filter = CreateConstTensor(&tfLiteFilterTensor, filterTensorInfo);
- armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
+ auto filterName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Filter");
+ armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter, filterName.c_str());
weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
}
@@ -504,7 +517,10 @@ TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
if(biasTensorInfo.IsConstant())
{
auto biasTensor = CreateConstTensor(&tfLiteBiasTensor, biasTensorInfo);
- armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
+
+ auto biasName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Bias");
+ armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor,
+ biasName.c_str());
ARMNN_ASSERT(biasLayer != nullptr);
biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
@@ -516,7 +532,8 @@ TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
{
auto input = CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]], inputTensorInfo);
- armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input);
+ auto inputName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Input");
+ armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input, inputName.c_str());
inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
}
@@ -537,7 +554,7 @@ TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
return kTfLiteOk;
}
// Check and create activation
- return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData, nodeIndex);
}
TfLiteStatus VisitTransposeConv2dOperator(DelegateData& delegateData,
@@ -683,9 +700,11 @@ TfLiteStatus VisitTransposeConv2dOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
+ auto layerName = GetLayerName(armnn::LayerType::TransposeConvolution2d, nodeIndex);
armnn::IConnectableLayer* layer = delegateData.m_Network->AddTransposeConvolution2dLayer(descriptor,
filterTensor,
- armnn::EmptyOptional());
+ armnn::EmptyOptional(),
+ layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -694,7 +713,8 @@ TfLiteStatus VisitTransposeConv2dOperator(DelegateData& delegateData,
{
auto input = CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[2]], inputTensorInfo);
- armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input);
+ auto inputName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Input");
+ armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input, inputName.c_str());
inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
}
diff --git a/delegate/classic/src/ElementwiseBinary.hpp b/delegate/classic/src/ElementwiseBinary.hpp
index 8055a6958c..8309a79d38 100644
--- a/delegate/classic/src/ElementwiseBinary.hpp
+++ b/delegate/classic/src/ElementwiseBinary.hpp
@@ -250,11 +250,13 @@ TfLiteStatus ValidateSubOperator(DelegateData& delegateData,
}
std::pair<armnn::IConnectableLayer*, armnn::IConnectableLayer*> AddFloorDivLayer(
- DelegateData& delegateData,
- const armnn::TensorInfo& outputTensorInfo)
+ DelegateData& delegateData,
+ const armnn::TensorInfo& outputTensorInfo,
+ int nodeIndex)
{
+ auto divName = GetLayerName(armnn::BinaryOperation::Div, nodeIndex);
armnn::IConnectableLayer* divisionLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
- armnn::BinaryOperation::Div);
+ armnn::BinaryOperation::Div, divName.c_str());
// if the output of the div is Signed32 the Floor layer is not required
if (armnn::DataType::Signed32 == outputTensorInfo.GetDataType())
{
@@ -262,7 +264,8 @@ std::pair<armnn::IConnectableLayer*, armnn::IConnectableLayer*> AddFloorDivLayer
}
armnn::IOutputSlot& outputSlot = divisionLayer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
- armnn::IConnectableLayer* floorLayer = delegateData.m_Network->AddFloorLayer();
+ auto floorName = GetLayerName(armnn::LayerType::Floor, nodeIndex);
+ armnn::IConnectableLayer* floorLayer = delegateData.m_Network->AddFloorLayer(floorName.c_str());
outputSlot.Connect(floorLayer->GetInputSlot(0));
return std::make_pair(divisionLayer, floorLayer);
}
@@ -397,46 +400,55 @@ TfLiteStatus VisitElementwiseBinaryOperator(DelegateData& delegateData,
armnn::IConnectableLayer* elementwiseBinaryLayer = nullptr;
MultiLayerFacade multiLayer;
+ std::string layerName;
switch(elementwiseBinaryOperatorCode)
{
case kTfLiteBuiltinAdd:
+ layerName = GetLayerName(armnn::BinaryOperation::Add, nodeIndex);
elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
- armnn::BinaryOperation::Add);
+ armnn::BinaryOperation::Add, layerName.c_str());
break;
case kTfLiteBuiltinDiv:
+ layerName = GetLayerName(armnn::BinaryOperation::Div, nodeIndex);
elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
- armnn::BinaryOperation::Div);
+ armnn::BinaryOperation::Div, layerName.c_str());
break;
case kTfLiteBuiltinFloorDiv:
{
- auto layers = AddFloorDivLayer(delegateData, outputTensorInfo);
+ auto layers = AddFloorDivLayer(delegateData, outputTensorInfo, nodeIndex);
multiLayer.AssignValues(layers.first, layers.second);
elementwiseBinaryLayer = &multiLayer;
}
break;
case kTfLiteBuiltinMaximum:
+ layerName = GetLayerName(armnn::BinaryOperation::Maximum, nodeIndex);
elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
- armnn::BinaryOperation::Maximum);
+ armnn::BinaryOperation::Maximum, layerName.c_str());
break;
case kTfLiteBuiltinMinimum:
+ layerName = GetLayerName(armnn::BinaryOperation::Minimum, nodeIndex);
elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
- armnn::BinaryOperation::Minimum);
+ armnn::BinaryOperation::Minimum, layerName.c_str());
break;
case kTfLiteBuiltinMul:
+ layerName = GetLayerName(armnn::BinaryOperation::Mul, nodeIndex);
elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
- armnn::BinaryOperation::Mul);
+ armnn::BinaryOperation::Mul, layerName.c_str());
break;
case kTfLiteBuiltinPow:
+ layerName = GetLayerName(armnn::BinaryOperation::Power, nodeIndex);
elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
- armnn::BinaryOperation::Power);
+ armnn::BinaryOperation::Power, layerName.c_str());
break;
case kTfLiteBuiltinSquaredDifference:
+ layerName = GetLayerName(armnn::BinaryOperation::SqDiff, nodeIndex);
elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
- armnn::BinaryOperation::SqDiff);
+ armnn::BinaryOperation::SqDiff, layerName.c_str());
break;
case kTfLiteBuiltinSub:
+ layerName = GetLayerName(armnn::BinaryOperation::Sub, nodeIndex);
elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
- armnn::BinaryOperation::Sub);
+ armnn::BinaryOperation::Sub, layerName.c_str());
break;
default:
return kTfLiteError;
@@ -448,7 +460,8 @@ TfLiteStatus VisitElementwiseBinaryOperator(DelegateData& delegateData,
auto inputsTensorsProcess = ProcessInputs(elementwiseBinaryLayer,
delegateData,
tfLiteContext,
- tfLiteNode);
+ tfLiteNode,
+ nodeIndex);
if (inputsTensorsProcess == kTfLiteError)
{
return inputsTensorsProcess;
@@ -465,7 +478,8 @@ TfLiteStatus VisitElementwiseBinaryOperator(DelegateData& delegateData,
return kTfLiteOk;
}
// Check and Create Activation
- return FusedActivation(tfLiteContext, tfLiteNode, activationType, elementwiseBinaryLayer, 0, delegateData);
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, elementwiseBinaryLayer, 0, delegateData,
+ nodeIndex);
}
} // namespace armnnDelegate
diff --git a/delegate/classic/src/ElementwiseUnary.hpp b/delegate/classic/src/ElementwiseUnary.hpp
index 562ce1fd9f..4a898e6499 100644
--- a/delegate/classic/src/ElementwiseUnary.hpp
+++ b/delegate/classic/src/ElementwiseUnary.hpp
@@ -71,7 +71,8 @@ TfLiteStatus VisitElementwiseUnaryOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddElementwiseUnaryLayer(descriptor);
+ auto layerName = GetLayerName(descriptor.m_Operation, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddElementwiseUnaryLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -79,7 +80,7 @@ TfLiteStatus VisitElementwiseUnaryOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/Fill.hpp b/delegate/classic/src/Fill.hpp
index 15dc91e481..e0ba2f9b75 100644
--- a/delegate/classic/src/Fill.hpp
+++ b/delegate/classic/src/Fill.hpp
@@ -92,7 +92,8 @@ TfLiteStatus VisitFillOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddFillLayer(descriptor);
+ auto layerName = GetLayerName(armnn::LayerType::Fill, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddFillLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -102,7 +103,8 @@ TfLiteStatus VisitFillOperator(DelegateData& delegateData,
auto inputsTensorsProcess = ProcessInputs(layer,
delegateData,
tfLiteContext,
- tfLiteNode);
+ tfLiteNode,
+ nodeIndex);
if (inputsTensorsProcess == kTfLiteError)
{
return inputsTensorsProcess;
diff --git a/delegate/classic/src/FullyConnected.hpp b/delegate/classic/src/FullyConnected.hpp
index 9ce06a8d45..2d4e987942 100644
--- a/delegate/classic/src/FullyConnected.hpp
+++ b/delegate/classic/src/FullyConnected.hpp
@@ -166,7 +166,8 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddFullyConnectedLayer(descriptor);
+ auto layerName = GetLayerName(armnn::LayerType::FullyConnected, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddFullyConnectedLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -176,7 +177,9 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData,
auto weightsTensor = CreateConstTensor(&tfLiteWeightsTensor,
weightsTensorInfo);
- armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(weightsTensor);
+ auto weightsName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Weights");
+ armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(weightsTensor,
+ weightsName.c_str());
weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsTensorInfo);
@@ -190,7 +193,9 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData,
auto biasTensor = CreateConstTensor(&tfLiteBiasTensor,
biasTensorInfo);
- armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
+ auto biasName = GetLayerName(armnn::LayerType::FullyConnected, nodeIndex, "Bias");
+ armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor,
+ biasName.c_str());
ARMNN_ASSERT(biasLayer != nullptr);
biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
@@ -199,13 +204,14 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData,
}
// The data input can also be constant, so we must check that this is also allocated to an input slot
- if(inputTensorInfo.IsConstant())
+ if (inputTensorInfo.IsConstant())
{
auto input =
CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]],
inputTensorInfo);
- armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input);
+ auto constantName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Input");
+ armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input, constantName.c_str());
inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
}
@@ -219,7 +225,9 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData,
// Add reshape to flatten to 2D [batch_size, input_size]
armnn::ReshapeDescriptor reshapeDescriptor;
reshapeDescriptor.m_TargetShape = reshapedTensorInfo.GetShape();
- reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor);
+
+ auto reshapeName = GetLayerName(armnn::LayerType::Reshape, nodeIndex, "Input");
+ reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor, reshapeName.c_str());
ARMNN_ASSERT(reshapeLayer != nullptr);
reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo);
@@ -251,7 +259,7 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData,
if (outputTensorInfo.GetNumDimensions() > 2)
{
layer = AddReshapeLayer(tfLiteContext, tfLiteNode, layer, reshapedOutputTensorInfo, outputTensorInfo,
- delegateData);
+ delegateData, nodeIndex);
if (!layer)
{
TF_LITE_MAYBE_KERNEL_LOG(
@@ -270,7 +278,7 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData,
}
// Check and Create Activation
- return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData, nodeIndex);
}
} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/classic/src/Gather.hpp b/delegate/classic/src/Gather.hpp
index f9611a40cf..30dbd0dc0e 100644
--- a/delegate/classic/src/Gather.hpp
+++ b/delegate/classic/src/Gather.hpp
@@ -88,7 +88,8 @@ TfLiteStatus VisitGatherOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddGatherLayer(gatherDescriptor);
+ auto layerName = GetLayerName(armnn::LayerType::Gather, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddGatherLayer(gatherDescriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -96,7 +97,8 @@ TfLiteStatus VisitGatherOperator(DelegateData& delegateData,
auto inputsTensorsProcess = ProcessInputs(layer,
delegateData,
tfLiteContext,
- tfLiteNode);
+ tfLiteNode,
+ nodeIndex);
if (inputsTensorsProcess == kTfLiteError)
{
return inputsTensorsProcess;
diff --git a/delegate/classic/src/GatherNd.hpp b/delegate/classic/src/GatherNd.hpp
index e1ee2ac8c0..a49b768873 100644
--- a/delegate/classic/src/GatherNd.hpp
+++ b/delegate/classic/src/GatherNd.hpp
@@ -64,7 +64,8 @@ TfLiteStatus VisitGatherNdOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddGatherNdLayer();
+ auto layerName = GetLayerName(armnn::LayerType::GatherNd, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddGatherNdLayer(layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -72,7 +73,8 @@ TfLiteStatus VisitGatherNdOperator(DelegateData& delegateData,
auto inputsTensorsProcess = ProcessInputs(layer,
delegateData,
tfLiteContext,
- tfLiteNode);
+ tfLiteNode,
+ nodeIndex);
if (inputsTensorsProcess == kTfLiteError)
{
return inputsTensorsProcess;
diff --git a/delegate/classic/src/LogicalBinary.hpp b/delegate/classic/src/LogicalBinary.hpp
index d71618ee9c..b80b837e23 100644
--- a/delegate/classic/src/LogicalBinary.hpp
+++ b/delegate/classic/src/LogicalBinary.hpp
@@ -80,7 +80,9 @@ TfLiteStatus VisitLogicalBinaryOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* logicalBinaryLayer = delegateData.m_Network->AddLogicalBinaryLayer(desc);
+ auto layerName = GetLayerName(desc.m_Operation, nodeIndex);
+ armnn::IConnectableLayer* logicalBinaryLayer = delegateData.m_Network->AddLogicalBinaryLayer(desc,
+ layerName.c_str());
logicalBinaryLayer->SetBackendId(setBackend);
ARMNN_ASSERT(logicalBinaryLayer != nullptr);
@@ -90,7 +92,8 @@ TfLiteStatus VisitLogicalBinaryOperator(DelegateData& delegateData,
auto inputsTensorsProcess = ProcessInputs(logicalBinaryLayer,
delegateData,
tfLiteContext,
- tfLiteNode);
+ tfLiteNode,
+ nodeIndex);
if (inputsTensorsProcess == kTfLiteError)
{
return inputsTensorsProcess;
diff --git a/delegate/classic/src/Lstm.hpp b/delegate/classic/src/Lstm.hpp
index 518559fc21..2abc47f0d3 100644
--- a/delegate/classic/src/Lstm.hpp
+++ b/delegate/classic/src/Lstm.hpp
@@ -242,7 +242,8 @@ TfLiteStatus VisitLstmOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddLstmLayer(desc, params);
+ auto layerName = GetLayerName(armnn::LayerType::Lstm, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddLstmLayer(desc, params, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
diff --git a/delegate/classic/src/Normalization.hpp b/delegate/classic/src/Normalization.hpp
index ef2e524369..befadddd77 100644
--- a/delegate/classic/src/Normalization.hpp
+++ b/delegate/classic/src/Normalization.hpp
@@ -63,7 +63,8 @@ TfLiteStatus VisitL2NormalizationOperator(DelegateData& delegateData,
}
// Add a L2Normalization layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddL2NormalizationLayer(descriptor);
+ auto layerName = GetLayerName(armnn::LayerType::L2Normalization, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddL2NormalizationLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -71,7 +72,7 @@ TfLiteStatus VisitL2NormalizationOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
@@ -142,7 +143,8 @@ TfLiteStatus VisitLocalResponseNormalizationOperator(DelegateData& delegateData,
}
// Add a Normalization layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddNormalizationLayer(descriptor);
+ auto layerName = GetLayerName(armnn::LayerType::Normalization, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddNormalizationLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -150,7 +152,7 @@ TfLiteStatus VisitLocalResponseNormalizationOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/Pack.hpp b/delegate/classic/src/Pack.hpp
index 99c8b804ff..029fd4288c 100644
--- a/delegate/classic/src/Pack.hpp
+++ b/delegate/classic/src/Pack.hpp
@@ -98,7 +98,8 @@ TfLiteStatus VisitPackOperator(DelegateData& delegateData,
}
// The TfLite Pack operator is equivalent to the ArmNN Stack operator
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddStackLayer(desc);
+ auto layerName = GetLayerName(armnn::LayerType::Stack, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddStackLayer(desc, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -106,7 +107,8 @@ TfLiteStatus VisitPackOperator(DelegateData& delegateData,
auto inputsTensorsProcess = ProcessInputs(layer,
delegateData,
tfLiteContext,
- tfLiteNode);
+ tfLiteNode,
+ nodeIndex);
if (inputsTensorsProcess == kTfLiteError)
{
return inputsTensorsProcess;
diff --git a/delegate/classic/src/Pad.hpp b/delegate/classic/src/Pad.hpp
index 440a3d023c..f8e8014d18 100644
--- a/delegate/classic/src/Pad.hpp
+++ b/delegate/classic/src/Pad.hpp
@@ -166,7 +166,8 @@ TfLiteStatus VisitPadOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* padLayer = delegateData.m_Network->AddPadLayer(descriptor);
+ auto layerName = GetLayerName(armnn::LayerType::Pad, nodeIndex);
+ armnn::IConnectableLayer* padLayer = delegateData.m_Network->AddPadLayer(descriptor, layerName.c_str());
padLayer->SetBackendId(setBackend);
ARMNN_ASSERT(padLayer != nullptr);
diff --git a/delegate/classic/src/Pooling.hpp b/delegate/classic/src/Pooling.hpp
index 50e944effc..f61a1a27d2 100644
--- a/delegate/classic/src/Pooling.hpp
+++ b/delegate/classic/src/Pooling.hpp
@@ -117,7 +117,8 @@ TfLiteStatus VisitPooling2dOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling2dLayer(descriptor);
+ auto layerName = GetLayerName(armnn::LayerType::Pooling2d, nodeIndex);
+ armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling2dLayer(descriptor, layerName.c_str());
poolingLayer->SetBackendId(setBackend);
ARMNN_ASSERT(poolingLayer != nullptr);
@@ -125,18 +126,18 @@ TfLiteStatus VisitPooling2dOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(poolingLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(poolingLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
- if(Connect(poolingLayer, tfLiteNode, delegateData) != kTfLiteOk)
+ if (Connect(poolingLayer, tfLiteNode, delegateData) != kTfLiteOk)
{
return kTfLiteError;
}
// Check and create activation
- return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData);
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData, nodeIndex);
}
TfLiteStatus VisitPooling3dOperator(DelegateData& delegateData,
@@ -302,6 +303,7 @@ TfLiteStatus VisitPooling3dOperator(DelegateData& delegateData,
}
// Create the Layer
+ auto layerName = GetLayerName(armnn::LayerType::Pooling3d, nodeIndex);
armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling3dLayer(descriptor);
poolingLayer->SetBackendId(setBackend);
ARMNN_ASSERT(poolingLayer != nullptr);
@@ -311,17 +313,17 @@ TfLiteStatus VisitPooling3dOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(poolingLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(poolingLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
- if(Connect(poolingLayer, tfLiteNode, delegateData) != kTfLiteOk)
+ if (Connect(poolingLayer, tfLiteNode, delegateData) != kTfLiteOk)
{
return kTfLiteError;
}
- return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData);
+ return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData, nodeIndex);
}
} // namespace armnnDelegate
diff --git a/delegate/classic/src/Prelu.hpp b/delegate/classic/src/Prelu.hpp
index 4fdad4a6b7..193e3f0376 100644
--- a/delegate/classic/src/Prelu.hpp
+++ b/delegate/classic/src/Prelu.hpp
@@ -81,7 +81,8 @@ TfLiteStatus VisitPreluOperator(DelegateData& delegateData,
outputTensorInfo);
}
- armnn::IConnectableLayer* preluLayer = delegateData.m_Network->AddPreluLayer();
+ auto layerName = GetLayerName(armnn::LayerType::Prelu, nodeIndex);
+ armnn::IConnectableLayer* preluLayer = delegateData.m_Network->AddPreluLayer(layerName.c_str());
ARMNN_ASSERT(preluLayer != nullptr);
bool isConstantAlpha = tflite::IsConstantTensor(&tfLiteAlphaTensor);
@@ -91,7 +92,9 @@ TfLiteStatus VisitPreluOperator(DelegateData& delegateData,
{
auto constAlphaTensor = armnn::ConstTensor(alphaTensorInfo, tfLiteAlphaTensor.data.data);
- armnn::IConnectableLayer* constLayer = delegateData.m_Network->AddConstantLayer(constAlphaTensor);
+ auto alphaName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Alpha");
+ armnn::IConnectableLayer* constLayer = delegateData.m_Network->AddConstantLayer(constAlphaTensor,
+ alphaName.c_str());
ARMNN_ASSERT(constLayer != nullptr);
constLayer->GetOutputSlot(0).SetTensorInfo(alphaTensorInfo);
diff --git a/delegate/classic/src/Quantization.hpp b/delegate/classic/src/Quantization.hpp
index f1192960e4..8291854383 100644
--- a/delegate/classic/src/Quantization.hpp
+++ b/delegate/classic/src/Quantization.hpp
@@ -70,7 +70,8 @@ TfLiteStatus VisitDequantizeOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* dequantizeLayer = delegateData.m_Network->AddDequantizeLayer();
+ auto layerName = GetLayerName(armnn::LayerType::Dequantize, nodeIndex);
+ armnn::IConnectableLayer* dequantizeLayer = delegateData.m_Network->AddDequantizeLayer(layerName.c_str());
dequantizeLayer->SetBackendId(setBackend);
ARMNN_ASSERT(dequantizeLayer != nullptr);
@@ -80,7 +81,8 @@ TfLiteStatus VisitDequantizeOperator(DelegateData& delegateData,
auto inputsTensorsProcess = ProcessInputs(dequantizeLayer,
delegateData,
tfLiteContext,
- tfLiteNode);
+ tfLiteNode,
+ nodeIndex);
if (inputsTensorsProcess == kTfLiteError)
{
return inputsTensorsProcess;
@@ -152,7 +154,8 @@ TfLiteStatus VisitQuantizeOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* quantizeLayer = delegateData.m_Network->AddQuantizeLayer();
+ auto layerName = GetLayerName(armnn::LayerType::Quantize, nodeIndex);
+ armnn::IConnectableLayer* quantizeLayer = delegateData.m_Network->AddQuantizeLayer(layerName.c_str());
quantizeLayer->SetBackendId(setBackend);
ARMNN_ASSERT(quantizeLayer != nullptr);
@@ -160,7 +163,7 @@ TfLiteStatus VisitQuantizeOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(quantizeLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(quantizeLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/Redefine.hpp b/delegate/classic/src/Redefine.hpp
index 2c29083719..6b10e448e7 100644
--- a/delegate/classic/src/Redefine.hpp
+++ b/delegate/classic/src/Redefine.hpp
@@ -64,7 +64,8 @@ TfLiteStatus VisitCastOperator(DelegateData& delegateData,
}
// Add a Cast layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddCastLayer();
+ auto layerName = GetLayerName(armnn::LayerType::Cast, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddCastLayer(layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -72,7 +73,7 @@ TfLiteStatus VisitCastOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
@@ -206,7 +207,8 @@ TfLiteStatus VisitReshapeOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc);
+ auto layerName = GetLayerName(armnn::LayerType::Reshape, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -214,7 +216,7 @@ TfLiteStatus VisitReshapeOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
@@ -291,7 +293,8 @@ TfLiteStatus VisitSqueezeOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc);
+ auto layerName = GetLayerName(armnn::LayerType::Reshape, nodeIndex, "Squeeze");
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -299,7 +302,7 @@ TfLiteStatus VisitSqueezeOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk)
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
@@ -396,7 +399,8 @@ TfLiteStatus VisitExpandDimsOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc);
+ auto layerName = GetLayerName(armnn::LayerType::Reshape, nodeIndex, "ExpandDims");
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -405,7 +409,7 @@ TfLiteStatus VisitExpandDimsOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk)
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/Reduce.hpp b/delegate/classic/src/Reduce.hpp
index 2d8b462cd2..8731ef5235 100644
--- a/delegate/classic/src/Reduce.hpp
+++ b/delegate/classic/src/Reduce.hpp
@@ -125,8 +125,9 @@ TfLiteStatus VisitReduceOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- // Add an Reduce layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddReduceLayer(desc);
+ // Add a Reduce layer
+ auto layerName = GetLayerName(armnn::LayerType::Reduce, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddReduceLayer(desc, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -134,7 +135,7 @@ TfLiteStatus VisitReduceOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/Resize.hpp b/delegate/classic/src/Resize.hpp
index 32c7f46b9f..cede32b6bf 100644
--- a/delegate/classic/src/Resize.hpp
+++ b/delegate/classic/src/Resize.hpp
@@ -185,14 +185,14 @@ TfLiteStatus VisitResizeOperator(DelegateData& delegateData,
}
- armnn::IConnectableLayer* resizeLayer = nullptr;
- resizeLayer = delegateData.m_Network->AddResizeLayer(desc, layerName.c_str());
+ auto resizeName = GetLayerName(armnn::LayerType::Resize, nodeIndex);
+ armnn::IConnectableLayer* resizeLayer = delegateData.m_Network->AddResizeLayer(desc, resizeName.c_str());
armnn::IOutputSlot& outputSlot = resizeLayer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(resizeLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(resizeLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/ReverseV2.hpp b/delegate/classic/src/ReverseV2.hpp
index d49d20b5c1..64941f7c5c 100644
--- a/delegate/classic/src/ReverseV2.hpp
+++ b/delegate/classic/src/ReverseV2.hpp
@@ -109,8 +109,6 @@ TfLiteStatus VisitReverseV2Operator(DelegateData& delegateData,
}
}
- std::string layerName("ReverseV2");
-
const auto maxDimension = 4;
const auto axisTensorNumValues = static_cast<unsigned int>(tfLiteAxisTensor.dims->size);
@@ -135,13 +133,14 @@ TfLiteStatus VisitReverseV2Operator(DelegateData& delegateData,
outputTensorInfo);
}
+ auto layerName = GetLayerName(armnn::LayerType::ReverseV2, nodeIndex);
armnn::IConnectableLayer* reverseV2Layer = delegateData.m_Network->AddReverseV2Layer(layerName.c_str());
armnn::IOutputSlot& outputSlot = reverseV2Layer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
// Try to connect the Constant Inputs if there are any
- if(ProcessInputs(reverseV2Layer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(reverseV2Layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/Round.hpp b/delegate/classic/src/Round.hpp
index 7a060b1d8f..d549a458cc 100644
--- a/delegate/classic/src/Round.hpp
+++ b/delegate/classic/src/Round.hpp
@@ -52,14 +52,15 @@ TfLiteStatus VisitFloorOperator(DelegateData& delegateData,
}
// Add a Floor layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddFloorLayer();
+ auto layerName = GetLayerName(armnn::LayerType::Floor, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddFloorLayer(layerName.c_str());
ARMNN_ASSERT(layer != nullptr);
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/Shape.hpp b/delegate/classic/src/Shape.hpp
index e5dae23238..10800b843b 100644
--- a/delegate/classic/src/Shape.hpp
+++ b/delegate/classic/src/Shape.hpp
@@ -42,7 +42,7 @@ TfLiteStatus VisitShapeOperator(DelegateData& delegateData,
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
auto* shapeParameters = reinterpret_cast<TfLiteShapeParams*>(tfLiteNode->builtin_data);
- if ( shapeParameters->out_type != kTfLiteInt32 && shapeParameters->out_type != kTfLiteInt64 )
+ if (shapeParameters->out_type != kTfLiteInt32 && shapeParameters->out_type != kTfLiteInt64)
{
TF_LITE_MAYBE_KERNEL_LOG(
tfLiteContext,
@@ -75,7 +75,8 @@ TfLiteStatus VisitShapeOperator(DelegateData& delegateData,
}
// Add a Shape layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddShapeLayer();
+ auto layerName = GetLayerName(armnn::LayerType::Shape, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddShapeLayer(layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -83,7 +84,7 @@ TfLiteStatus VisitShapeOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/Slice.hpp b/delegate/classic/src/Slice.hpp
index a586e024d1..9a63e43afc 100644
--- a/delegate/classic/src/Slice.hpp
+++ b/delegate/classic/src/Slice.hpp
@@ -149,9 +149,9 @@ TfLiteStatus VisitSliceOperator(DelegateData& delegateData,
validateFunc(outputTensorInfo, isSupported);
return isSupported ? kTfLiteOk : kTfLiteError;
}
- auto layerName = fmt::format("Slice:{}", nodeIndex);
// Add a Slice layer
+ auto layerName = GetLayerName(armnn::LayerType::Slice, nodeIndex);
armnn::IConnectableLayer* layer = delegateData.m_Network->AddSliceLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -160,7 +160,7 @@ TfLiteStatus VisitSliceOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/Softmax.hpp b/delegate/classic/src/Softmax.hpp
index bfc6874faa..dd36a3d05f 100644
--- a/delegate/classic/src/Softmax.hpp
+++ b/delegate/classic/src/Softmax.hpp
@@ -116,6 +116,7 @@ TfLiteStatus VisitSoftmaxOperator(DelegateData& delegateData,
}
}
+ auto layerName = GetLayerName(armnn::LayerType::Softmax, nodeIndex);
armnn::IConnectableLayer* softmaxLayer = nullptr;
switch(softmaxOperatorCode)
@@ -125,13 +126,13 @@ TfLiteStatus VisitSoftmaxOperator(DelegateData& delegateData,
armnn::SoftmaxDescriptor descriptor;
auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(tfLiteNode->builtin_data);
descriptor.m_Beta = params->beta;
- softmaxLayer = delegateData.m_Network->AddSoftmaxLayer(descriptor);
+ softmaxLayer = delegateData.m_Network->AddSoftmaxLayer(descriptor, layerName.c_str());
break;
}
case kTfLiteBuiltinLogSoftmax:
{
armnn::LogSoftmaxDescriptor descriptor;
- softmaxLayer = delegateData.m_Network->AddLogSoftmaxLayer(descriptor);
+ softmaxLayer = delegateData.m_Network->AddLogSoftmaxLayer(descriptor, layerName.c_str());
break;
}
default:
@@ -143,7 +144,7 @@ TfLiteStatus VisitSoftmaxOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(softmaxLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(softmaxLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/SpaceDepth.hpp b/delegate/classic/src/SpaceDepth.hpp
index cc7f03413d..b65207bb95 100644
--- a/delegate/classic/src/SpaceDepth.hpp
+++ b/delegate/classic/src/SpaceDepth.hpp
@@ -64,12 +64,13 @@ TfLiteStatus VisitSpaceToDepthOperator(DelegateData& delegateData,
}
// Add a SpaceToDepth layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddSpaceToDepthLayer(descriptor);
+ auto layerName = GetLayerName(armnn::LayerType::SpaceToDepth, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddSpaceToDepthLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
@@ -132,7 +133,8 @@ TfLiteStatus VisitDepthToSpaceOperator(DelegateData& delegateData,
}
// Add a DepthToSpace layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthToSpaceLayer(descriptor);
+ auto layerName = GetLayerName(armnn::LayerType::DepthToSpace, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthToSpaceLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -140,7 +142,7 @@ TfLiteStatus VisitDepthToSpaceOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/Split.hpp b/delegate/classic/src/Split.hpp
index 877e0b5729..fcd901b23e 100644
--- a/delegate/classic/src/Split.hpp
+++ b/delegate/classic/src/Split.hpp
@@ -130,7 +130,8 @@ TfLiteStatus VisitSplitOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddSplitterLayer(splitDescriptor);
+ auto layerName = GetLayerName(armnn::LayerType::Splitter, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddSplitterLayer(splitDescriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -335,7 +336,7 @@ TfLiteStatus VisitSplitVOperator(DelegateData& delegateData,
}
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/StridedSlice.hpp b/delegate/classic/src/StridedSlice.hpp
index 998e3d3e14..43f96411b6 100644
--- a/delegate/classic/src/StridedSlice.hpp
+++ b/delegate/classic/src/StridedSlice.hpp
@@ -135,7 +135,8 @@ TfLiteStatus VisitStridedSliceOperator(DelegateData& delegateData,
}
// Add a StridedSlice layer
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddStridedSliceLayer(descriptor);
+ auto layerName = GetLayerName(armnn::LayerType::StridedSlice, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddStridedSliceLayer(descriptor, layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
@@ -143,7 +144,7 @@ TfLiteStatus VisitStridedSliceOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/Tile.hpp b/delegate/classic/src/Tile.hpp
index 974c771a7e..ea4ebbf5bb 100644
--- a/delegate/classic/src/Tile.hpp
+++ b/delegate/classic/src/Tile.hpp
@@ -148,7 +148,7 @@ TfLiteStatus VisitTileOperator(DelegateData& delegateData,
tileDescriptor);
}
- std::string layerName("Tile");
+ auto layerName = GetLayerName(armnn::LayerType::Tile, nodeIndex);
armnn::IConnectableLayer* layer = delegateData.m_Network->AddTileLayer(tileDescriptor, layerName.c_str());
if (layer == nullptr)
@@ -158,7 +158,7 @@ TfLiteStatus VisitTileOperator(DelegateData& delegateData,
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
- if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk)
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/Transpose.hpp b/delegate/classic/src/Transpose.hpp
index 41178d0b59..247ddf7e9e 100644
--- a/delegate/classic/src/Transpose.hpp
+++ b/delegate/classic/src/Transpose.hpp
@@ -91,7 +91,8 @@ TfLiteStatus VisitTransposeOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* transposeLayer = delegateData.m_Network->AddTransposeLayer(descriptor);
+ auto layerName = GetLayerName(armnn::LayerType::Transpose, nodeIndex);
+ armnn::IConnectableLayer* transposeLayer = delegateData.m_Network->AddTransposeLayer(descriptor, layerName.c_str());
transposeLayer->SetBackendId(setBackend);
ARMNN_ASSERT(transposeLayer != nullptr);
ARMNN_ASSERT(transposeLayer->GetNumInputSlots() == 1); // permutation vector given to descriptor object
@@ -100,7 +101,7 @@ TfLiteStatus VisitTransposeOperator(DelegateData& delegateData,
outputSlot.SetTensorInfo(outputTensorInfo);
// try to connect the Constant Inputs if there are any
- if(ProcessInputs(transposeLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ if (ProcessInputs(transposeLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
{
return kTfLiteError;
}
diff --git a/delegate/classic/src/UnidirectionalSequenceLstm.hpp b/delegate/classic/src/UnidirectionalSequenceLstm.hpp
index 0e1ad1c754..5fa6bb0260 100644
--- a/delegate/classic/src/UnidirectionalSequenceLstm.hpp
+++ b/delegate/classic/src/UnidirectionalSequenceLstm.hpp
@@ -278,7 +278,10 @@ TfLiteStatus VisitUnidirectionalSequenceLstmOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- armnn::IConnectableLayer* layer = delegateData.m_Network->AddUnidirectionalSequenceLstmLayer(desc, params);
+ auto layerName = GetLayerName(armnn::LayerType::UnidirectionalSequenceLstm, nodeIndex);
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddUnidirectionalSequenceLstmLayer(desc,
+ params,
+ layerName.c_str());
layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
diff --git a/delegate/classic/src/Unpack.hpp b/delegate/classic/src/Unpack.hpp
index 685293bcb6..2cd32564c1 100644
--- a/delegate/classic/src/Unpack.hpp
+++ b/delegate/classic/src/Unpack.hpp
@@ -171,10 +171,8 @@ TfLiteStatus VisitUnpackOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
};
- std::string splitterLayerName("Unpack Splitter");
-
- armnn::IConnectableLayer* splitterLayer = delegateData.m_Network->AddSplitterLayer(splitDesc,
- splitterLayerName.c_str());
+ auto layerName = GetLayerName(armnn::LayerType::Splitter, nodeIndex, "Unpack");
+ armnn::IConnectableLayer* splitterLayer = delegateData.m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
splitterLayer->SetBackendId(setBackendSplit);
ARMNN_ASSERT(splitterLayer != nullptr);
@@ -189,9 +187,9 @@ TfLiteStatus VisitUnpackOperator(DelegateData& delegateData,
// Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
for (unsigned int outputIndex = 0; outputIndex < splitterLayer->GetNumOutputSlots(); ++outputIndex)
{
- std::string reshapeLayerName("Unpack Reshape");
+ auto reshapeName = GetLayerName(armnn::LayerType::Reshape, nodeIndex, "Unpack");
armnn::IConnectableLayer* reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor,
- reshapeLayerName.c_str());
+ reshapeName.c_str());
reshapeLayer->SetBackendId(setBackendReshape);
ARMNN_ASSERT(reshapeLayer != nullptr);