aboutsummaryrefslogtreecommitdiff
path: root/delegate/src
diff options
context:
space:
mode:
Diffstat (limited to 'delegate/src')
-rw-r--r--delegate/src/Activation.hpp1
-rw-r--r--delegate/src/ArgMinMax.hpp3
-rw-r--r--delegate/src/BatchMatMul.hpp5
-rw-r--r--delegate/src/BatchSpace.hpp6
-rw-r--r--delegate/src/Comparison.hpp4
-rw-r--r--delegate/src/Control.hpp6
-rw-r--r--delegate/src/Convolution.hpp12
-rw-r--r--delegate/src/DelegateUtils.hpp15
-rw-r--r--delegate/src/ElementwiseBinary.hpp6
-rw-r--r--delegate/src/ElementwiseUnary.hpp4
-rw-r--r--delegate/src/Fill.hpp3
-rw-r--r--delegate/src/FullyConnected.hpp3
-rw-r--r--delegate/src/Gather.hpp5
-rw-r--r--delegate/src/GatherNd.hpp3
-rw-r--r--delegate/src/LogicalBinary.hpp3
-rw-r--r--delegate/src/Lstm.hpp3
-rw-r--r--delegate/src/MultiLayerFacade.hpp4
-rw-r--r--delegate/src/Normalization.hpp6
-rw-r--r--delegate/src/Pack.hpp5
-rw-r--r--delegate/src/Pad.hpp3
-rw-r--r--delegate/src/Pooling.hpp6
-rw-r--r--delegate/src/Prelu.hpp1
-rw-r--r--delegate/src/Quantization.hpp6
-rw-r--r--delegate/src/Redefine.hpp6
-rw-r--r--delegate/src/Reduce.hpp3
-rw-r--r--delegate/src/Resize.hpp1
-rw-r--r--delegate/src/Shape.hpp3
-rw-r--r--delegate/src/SharedFunctions.cpp3
-rw-r--r--delegate/src/Slice.hpp6
-rw-r--r--delegate/src/Softmax.hpp2
-rw-r--r--delegate/src/SpaceDepth.hpp6
-rw-r--r--delegate/src/Split.hpp8
-rw-r--r--delegate/src/StridedSlice.hpp4
-rw-r--r--delegate/src/Transpose.hpp4
-rw-r--r--delegate/src/UnidirectionalSequenceLstm.hpp3
-rw-r--r--delegate/src/Unpack.hpp6
36 files changed, 157 insertions, 11 deletions
diff --git a/delegate/src/Activation.hpp b/delegate/src/Activation.hpp
index 0071873d16..3560bfdae7 100644
--- a/delegate/src/Activation.hpp
+++ b/delegate/src/Activation.hpp
@@ -29,6 +29,7 @@ TfLiteStatus ValidateActivationOperator(DelegateData& delegateData,
IsActivationSupported,
delegateData.m_Backends,
isSupported,
+ armnn::BackendId(),
inputInfo,
outputInfo,
activationDesc);
diff --git a/delegate/src/ArgMinMax.hpp b/delegate/src/ArgMinMax.hpp
index 057dc8ba0a..dd28807f67 100644
--- a/delegate/src/ArgMinMax.hpp
+++ b/delegate/src/ArgMinMax.hpp
@@ -91,6 +91,7 @@ TfLiteStatus VisitArgMinMaxOperator(DelegateData& delegateData,
}
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("ARGMINMAX",
@@ -98,6 +99,7 @@ TfLiteStatus VisitArgMinMaxOperator(DelegateData& delegateData,
IsArgMinMaxSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outInfo,
desc);
@@ -111,6 +113,7 @@ TfLiteStatus VisitArgMinMaxOperator(DelegateData& delegateData,
// Add an ArgMinMax layer
armnn::IConnectableLayer* layer = delegateData.m_Network->AddArgMinMaxLayer(desc);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
diff --git a/delegate/src/BatchMatMul.hpp b/delegate/src/BatchMatMul.hpp
index 391301e4d7..3b884a092f 100644
--- a/delegate/src/BatchMatMul.hpp
+++ b/delegate/src/BatchMatMul.hpp
@@ -68,6 +68,7 @@ namespace armnnDelegate
// Check if supported
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("BATCH_MATMUL",
@@ -75,6 +76,7 @@ namespace armnnDelegate
IsBatchMatMulSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
armnnLHSInputTensorInfo,
armnnRHSInputTensorInfo,
outputTensorInfo,
@@ -88,6 +90,7 @@ namespace armnnDelegate
}
armnn::IConnectableLayer* layer = delegateData.m_Network->AddBatchMatMulLayer(descriptor);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
@@ -96,4 +99,4 @@ namespace armnnDelegate
return kTfLiteOk;
}
-} // namespace armnnDelegate \ No newline at end of file
+} // namespace armnnDelegate
diff --git a/delegate/src/BatchSpace.hpp b/delegate/src/BatchSpace.hpp
index 847d6f15d2..903fe37eae 100644
--- a/delegate/src/BatchSpace.hpp
+++ b/delegate/src/BatchSpace.hpp
@@ -72,6 +72,7 @@ TfLiteStatus VisitBatchToSpaceNdOperator(DelegateData& delegateData,
// Check if supported
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("BATCH_TO_SPACE_ND",
@@ -79,6 +80,7 @@ TfLiteStatus VisitBatchToSpaceNdOperator(DelegateData& delegateData,
IsBatchToSpaceNdSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outputTensorInfo,
descriptor);
@@ -95,6 +97,7 @@ TfLiteStatus VisitBatchToSpaceNdOperator(DelegateData& delegateData,
// Add a BatchToSpace layer
armnn::IConnectableLayer* layer = delegateData.m_Network->AddBatchToSpaceNdLayer(descriptor);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
@@ -163,6 +166,7 @@ TfLiteStatus VisitSpaceToBatchNdOperator(DelegateData& delegateData,
// Check if supported
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("SPACE_TO_BATCH_ND",
@@ -170,6 +174,7 @@ TfLiteStatus VisitSpaceToBatchNdOperator(DelegateData& delegateData,
IsSpaceToBatchNdSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outputTensorInfo,
descriptor);
@@ -186,6 +191,7 @@ TfLiteStatus VisitSpaceToBatchNdOperator(DelegateData& delegateData,
// Add a SpaceToBatch layer
armnn::IConnectableLayer* layer = delegateData.m_Network->AddSpaceToBatchNdLayer(descriptor);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
diff --git a/delegate/src/Comparison.hpp b/delegate/src/Comparison.hpp
index 8bf53c71ef..ee121e3c5c 100644
--- a/delegate/src/Comparison.hpp
+++ b/delegate/src/Comparison.hpp
@@ -88,7 +88,7 @@ TfLiteStatus VisitComparisonOperator(DelegateData& delegateData,
armnn::ComparisonDescriptor descriptor(comparisonOperation);
bool isSupported = false;
-
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("COMPARISON",
@@ -96,6 +96,7 @@ TfLiteStatus VisitComparisonOperator(DelegateData& delegateData,
IsComparisonSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo0,
inputTensorInfo1,
outputTensorInfo,
@@ -109,6 +110,7 @@ TfLiteStatus VisitComparisonOperator(DelegateData& delegateData,
}
armnn::IConnectableLayer* comparisonLayer = delegateData.m_Network->AddComparisonLayer(descriptor);
+ comparisonLayer->SetBackendId(setBackend);
ARMNN_ASSERT(comparisonLayer != nullptr);
armnn::IOutputSlot& outputSlot = comparisonLayer->GetOutputSlot(0);
diff --git a/delegate/src/Control.hpp b/delegate/src/Control.hpp
index f04245bcb6..02426a5616 100644
--- a/delegate/src/Control.hpp
+++ b/delegate/src/Control.hpp
@@ -119,6 +119,7 @@ TfLiteStatus VisitConcatenationOperator(DelegateData& delegateData,
// Check if supported
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("CONCATENATION",
@@ -126,6 +127,7 @@ TfLiteStatus VisitConcatenationOperator(DelegateData& delegateData,
IsConcatSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputConstTensorInfos,
outputTensorInfo,
concatDescriptor);
@@ -139,6 +141,7 @@ TfLiteStatus VisitConcatenationOperator(DelegateData& delegateData,
// Setup layer and connect.
armnn::IConnectableLayer* concatenationLayer = delegateData.m_Network->AddConcatLayer(concatDescriptor);
+ concatenationLayer->SetBackendId(setBackend);
ARMNN_ASSERT(concatenationLayer != nullptr);
// Connect the Constant Inputs
@@ -258,6 +261,7 @@ TfLiteStatus VisitMeanOperator(DelegateData& delegateData,
// Check if supported
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("MEAN",
@@ -265,6 +269,7 @@ TfLiteStatus VisitMeanOperator(DelegateData& delegateData,
IsMeanSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outputTensorInfo,
desc);
@@ -278,6 +283,7 @@ TfLiteStatus VisitMeanOperator(DelegateData& delegateData,
// Setup layer and connect.
armnn::IConnectableLayer* meanLayer = delegateData.m_Network->AddMeanLayer(desc);
+ meanLayer->SetBackendId(setBackend);
ARMNN_ASSERT(meanLayer != nullptr);
armnn::IOutputSlot& outputSlot = meanLayer->GetOutputSlot(0);
diff --git a/delegate/src/Convolution.hpp b/delegate/src/Convolution.hpp
index 93da4c8ce2..e307bb9be3 100644
--- a/delegate/src/Convolution.hpp
+++ b/delegate/src/Convolution.hpp
@@ -144,6 +144,7 @@ TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
+ armnn::BackendId setBackend;
if (!delegateData.m_Network)
{
bool isSupported = false;
@@ -152,6 +153,7 @@ TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
IsConvolution2dSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outputTensorInfo,
descriptor,
@@ -162,6 +164,7 @@ TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
// Set up filter and biases
armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution2dLayer(descriptor);
+ layer->SetBackendId(setBackend);
if(tflite::IsConstantTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[1]]))
{
@@ -300,6 +303,7 @@ TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
// If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the
// support for the operator
// If supported, VisitConvolutionOperator will be called again to add the layer to the network as seen below.
+ armnn::BackendId setBackend;
if (!delegateData.m_Network)
{
bool isSupported = false;
@@ -308,6 +312,7 @@ TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
IsConvolution3dSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outputTensorInfo,
descriptor,
@@ -317,6 +322,7 @@ TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
}
armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution3dLayer(descriptor);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
// Add a constant layer for weights and biases if inputs are constant,
@@ -497,6 +503,7 @@ TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
}
+ armnn::BackendId setBackend;
if (!delegateData.m_Network)
{
bool isSupported = false;
@@ -505,6 +512,7 @@ TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
IsDepthwiseConvolutionSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outputTensorInfo,
descriptor,
@@ -514,6 +522,7 @@ TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
}
armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthwiseConvolution2dLayer(descriptor);
+ layer->SetBackendId(setBackend);
if(tflite::IsConstantTensor(&tfLiteFilterTensor))
{
@@ -699,6 +708,7 @@ TfLiteStatus VisitTransposeConv2dOperator(DelegateData& delegateData,
auto filterTensor = CreateConstTensor(&tfLiteFilterTensor,
filterTensorInfo,
armnn::Optional<armnn::PermutationVector&>());
+ armnn::BackendId setBackend;
if (!delegateData.m_Network)
{
bool isSupported = false;
@@ -707,6 +717,7 @@ TfLiteStatus VisitTransposeConv2dOperator(DelegateData& delegateData,
IsTransposeConvolution2dSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outputTensorInfo,
descriptor,
@@ -718,6 +729,7 @@ TfLiteStatus VisitTransposeConv2dOperator(DelegateData& delegateData,
armnn::IConnectableLayer* layer = delegateData.m_Network->AddTransposeConvolution2dLayer(descriptor,
filterTensor,
armnn::EmptyOptional());
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
diff --git a/delegate/src/DelegateUtils.hpp b/delegate/src/DelegateUtils.hpp
index 58d8048be3..850b279fea 100644
--- a/delegate/src/DelegateUtils.hpp
+++ b/delegate/src/DelegateUtils.hpp
@@ -25,7 +25,7 @@ namespace
{
// Macro to call an Is<layer_name>Supported function and log caller name together with reason for lack of support
-#define FORWARD_LAYER_SUPPORT_FUNC(opName, tfLiteContext, func, backends, supported, ...) \
+#define FORWARD_LAYER_SUPPORT_FUNC(opName, tfLiteContext, func, backends, supported, setBackend, ...) \
try \
{ \
for (auto&& backendId : backends) \
@@ -38,6 +38,7 @@ try \
layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
if (supported) \
{ \
+ setBackend = backendId; \
break; \
} \
else \
@@ -224,11 +225,13 @@ armnn::IConnectableLayer* BroadcastTensor(const armnn::TensorInfo& inputInfo0,
armnn::ReshapeDescriptor reshapeDescriptor;
reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
bool isSupported = false;
+ armnn::BackendId setBackend;
FORWARD_LAYER_SUPPORT_FUNC("RESHAPE",
tfLiteContext,
IsReshapeSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
smallInfo,
reshapedInfo,
reshapeDescriptor);
@@ -240,6 +243,7 @@ armnn::IConnectableLayer* BroadcastTensor(const armnn::TensorInfo& inputInfo0,
ARMNN_ASSERT(delegateData.m_Network != nullptr);
// Add Reshape layer
armnn::IConnectableLayer* reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor);
+ reshapeLayer->SetBackendId(setBackend);
ARMNN_ASSERT(reshapeLayer != nullptr);
reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
@@ -331,11 +335,13 @@ TfLiteStatus FusedActivation(TfLiteContext* tfLiteContext,
}
bool isSupported = false;
+ armnn::BackendId setBackend;
FORWARD_LAYER_SUPPORT_FUNC("ACTIVATION",
tfLiteContext,
IsActivationSupported,
data.m_Backends,
isSupported,
+ setBackend,
prevLayer->GetOutputSlot(0).GetTensorInfo(),
activationOutputInfo,
activationDesc);
@@ -344,6 +350,7 @@ TfLiteStatus FusedActivation(TfLiteContext* tfLiteContext,
return kTfLiteError;
}
armnn::IConnectableLayer* activationLayer = data.m_Network->AddActivationLayer(activationDesc);
+ activationLayer->SetBackendId(setBackend);
ARMNN_ASSERT(activationLayer != nullptr);
activationLayer->GetOutputSlot(0).SetTensorInfo(activationOutputInfo);
@@ -566,11 +573,13 @@ TfLiteStatus ConnectConstant(armnn::IConnectableLayer* layer,
{
IgnoreUnused(layer);
bool isSupported = false;
+ armnn::BackendId setBackend;
FORWARD_LAYER_SUPPORT_FUNC("CONSTANT",
tfLiteContext,
IsConstantSupported,
data.m_Backends,
isSupported,
+ setBackend,
constTensorInfo);
if (!isSupported)
{
@@ -581,6 +590,7 @@ TfLiteStatus ConnectConstant(armnn::IConnectableLayer* layer,
constTensorInfo,
armnn::Optional<armnn::PermutationVector&>());
armnn::IConnectableLayer* constantLayer = data.m_Network->AddConstantLayer(constantInput);
+ constantLayer->SetBackendId(setBackend);
armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
outputSlot.SetTensorInfo(constTensorInfo);
@@ -615,11 +625,13 @@ TfLiteStatus ProcessInputs(armnn::IConnectableLayer* layer,
{
armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
bool isSupported = false;
+ armnn::BackendId setBackend;
FORWARD_LAYER_SUPPORT_FUNC("CONSTANT",
tfLiteContext,
IsConstantSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo);
if (!isSupported)
{
@@ -629,6 +641,7 @@ TfLiteStatus ProcessInputs(armnn::IConnectableLayer* layer,
inputTensorInfo,
armnn::Optional<armnn::PermutationVector&>());
armnn::IConnectableLayer* constantLayer = delegateData.m_Network->AddConstantLayer(constantInput);
+ constantLayer->SetBackendId(setBackend);
armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
outputSlot.SetTensorInfo(inputTensorInfo);
diff --git a/delegate/src/ElementwiseBinary.hpp b/delegate/src/ElementwiseBinary.hpp
index 6e81db4b4f..caf02624be 100644
--- a/delegate/src/ElementwiseBinary.hpp
+++ b/delegate/src/ElementwiseBinary.hpp
@@ -32,6 +32,7 @@ TfLiteStatus ValidateAddOperator(DelegateData& delegateData,
IsAdditionSupported,
delegateData.m_Backends,
isSupported,
+ armnn::BackendId(),
inputInfo1,
inputInfo2,
outputTensorInfo);
@@ -56,6 +57,7 @@ TfLiteStatus ValidateDivOperator(DelegateData& delegateData,
IsDivisionSupported,
delegateData.m_Backends,
isSupported,
+ armnn::BackendId(),
inputInfo1,
inputInfo2,
outputTensorInfo);
@@ -108,6 +110,7 @@ TfLiteStatus ValidateMaximumOperator(DelegateData& delegateData,
IsMaximumSupported,
delegateData.m_Backends,
isSupported,
+ armnn::BackendId(),
inputInfo1,
inputInfo2,
outputTensorInfo);
@@ -131,6 +134,7 @@ TfLiteStatus ValidateMinimumOperator(DelegateData& delegateData,
IsMinimumSupported,
delegateData.m_Backends,
isSupported,
+ armnn::BackendId(),
inputInfo1,
inputInfo2,
outputTensorInfo);
@@ -154,6 +158,7 @@ TfLiteStatus ValidateMulOperator(DelegateData& delegateData,
IsMultiplicationSupported,
delegateData.m_Backends,
isSupported,
+ armnn::BackendId(),
inputInfo1,
inputInfo2,
outputTensorInfo);
@@ -177,6 +182,7 @@ TfLiteStatus ValidateSubOperator(DelegateData& delegateData,
IsSubtractionSupported,
delegateData.m_Backends,
isSupported,
+ armnn::BackendId(),
inputInfo1,
inputInfo2,
outputTensorInfo);
diff --git a/delegate/src/ElementwiseUnary.hpp b/delegate/src/ElementwiseUnary.hpp
index 79d7f82249..947e531162 100644
--- a/delegate/src/ElementwiseUnary.hpp
+++ b/delegate/src/ElementwiseUnary.hpp
@@ -51,7 +51,7 @@ TfLiteStatus VisitElementwiseUnaryOperator(DelegateData& delegateData,
armnn::ElementwiseUnaryDescriptor descriptor(unaryOperation);
bool isSupported = false;
-
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("ELEMENTWISE_UNARY",
@@ -59,6 +59,7 @@ TfLiteStatus VisitElementwiseUnaryOperator(DelegateData& delegateData,
IsElementwiseUnarySupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outputTensorInfo,
descriptor);
@@ -71,6 +72,7 @@ TfLiteStatus VisitElementwiseUnaryOperator(DelegateData& delegateData,
}
armnn::IConnectableLayer* layer = delegateData.m_Network->AddElementwiseUnaryLayer(descriptor);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
diff --git a/delegate/src/Fill.hpp b/delegate/src/Fill.hpp
index dc30e53ba9..e79133e15c 100644
--- a/delegate/src/Fill.hpp
+++ b/delegate/src/Fill.hpp
@@ -72,6 +72,7 @@ TfLiteStatus VisitFillOperator(DelegateData& delegateData,
}
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("FILL",
@@ -79,6 +80,7 @@ TfLiteStatus VisitFillOperator(DelegateData& delegateData,
IsFillSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outInfo,
descriptor);
@@ -91,6 +93,7 @@ TfLiteStatus VisitFillOperator(DelegateData& delegateData,
}
armnn::IConnectableLayer* layer = delegateData.m_Network->AddFillLayer(descriptor);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
diff --git a/delegate/src/FullyConnected.hpp b/delegate/src/FullyConnected.hpp
index 6677ab900e..a2960e299b 100644
--- a/delegate/src/FullyConnected.hpp
+++ b/delegate/src/FullyConnected.hpp
@@ -110,6 +110,7 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData,
descriptor.m_ConstantWeights = isConstantWeights;
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("FULLY_CONNECTED",
@@ -117,6 +118,7 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData,
IsFullyConnectedSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
reshapedTensorInfo,
outputTensorInfo,
weightsTensorInfo,
@@ -131,6 +133,7 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData,
}
armnn::IConnectableLayer* layer = delegateData.m_Network->AddFullyConnectedLayer(descriptor);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
// Add a constant layer for weights and biases if inputs are constant.
diff --git a/delegate/src/Gather.hpp b/delegate/src/Gather.hpp
index 616de7e09e..9e98966471 100644
--- a/delegate/src/Gather.hpp
+++ b/delegate/src/Gather.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -69,6 +69,7 @@ TfLiteStatus VisitGatherOperator(DelegateData& delegateData,
return kTfLiteError;
}
+ armnn::BackendId setBackend;
if (!delegateData.m_Network)
{
// Check if supported
@@ -78,6 +79,7 @@ TfLiteStatus VisitGatherOperator(DelegateData& delegateData,
IsGatherSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
indicesTensorInfo,
outputTensorInfo,
@@ -86,6 +88,7 @@ TfLiteStatus VisitGatherOperator(DelegateData& delegateData,
}
armnn::IConnectableLayer* layer = delegateData.m_Network->AddGatherLayer(gatherDescriptor);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
diff --git a/delegate/src/GatherNd.hpp b/delegate/src/GatherNd.hpp
index 1e12c5cf68..f2192f77c3 100644
--- a/delegate/src/GatherNd.hpp
+++ b/delegate/src/GatherNd.hpp
@@ -46,6 +46,7 @@ TfLiteStatus VisitGatherNdOperator(DelegateData& delegateData,
const armnn::TensorInfo& indicesTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteIndicesTensor);
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+ armnn::BackendId setBackend;
if (!delegateData.m_Network)
{
// Check if supported
@@ -55,6 +56,7 @@ TfLiteStatus VisitGatherNdOperator(DelegateData& delegateData,
IsGatherNdSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
indicesTensorInfo,
outputTensorInfo);
@@ -62,6 +64,7 @@ TfLiteStatus VisitGatherNdOperator(DelegateData& delegateData,
}
armnn::IConnectableLayer* layer = delegateData.m_Network->AddGatherNdLayer();
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
diff --git a/delegate/src/LogicalBinary.hpp b/delegate/src/LogicalBinary.hpp
index 562b5d3438..b6a8f5d5f6 100644
--- a/delegate/src/LogicalBinary.hpp
+++ b/delegate/src/LogicalBinary.hpp
@@ -52,6 +52,7 @@ TfLiteStatus VisitLogicalBinaryOperator(DelegateData& delegateData,
// Check if supported
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("LOGICAL_BINARY",
@@ -59,6 +60,7 @@ TfLiteStatus VisitLogicalBinaryOperator(DelegateData& delegateData,
IsLogicalBinarySupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo0,
inputTensorInfo1,
outputTensorInfo,
@@ -72,6 +74,7 @@ TfLiteStatus VisitLogicalBinaryOperator(DelegateData& delegateData,
}
armnn::IConnectableLayer* logicalBinaryLayer = delegateData.m_Network->AddLogicalBinaryLayer(desc);
+ logicalBinaryLayer->SetBackendId(setBackend);
ARMNN_ASSERT(logicalBinaryLayer != nullptr);
armnn::IOutputSlot& outputSlot = logicalBinaryLayer->GetOutputSlot(0);
diff --git a/delegate/src/Lstm.hpp b/delegate/src/Lstm.hpp
index 253cd2162d..8c1f877ec9 100644
--- a/delegate/src/Lstm.hpp
+++ b/delegate/src/Lstm.hpp
@@ -216,6 +216,7 @@ TfLiteStatus VisitLstmOperator(DelegateData& delegateData,
}
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("LSTM",
@@ -223,6 +224,7 @@ TfLiteStatus VisitLstmOperator(DelegateData& delegateData,
IsLstmSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outputStateInInfo,
cellStateInInfo,
@@ -241,6 +243,7 @@ TfLiteStatus VisitLstmOperator(DelegateData& delegateData,
}
armnn::IConnectableLayer* layer = delegateData.m_Network->AddLstmLayer(desc, params);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
layer->GetOutputSlot(0).SetTensorInfo(scratchBufferTensorInfo);
diff --git a/delegate/src/MultiLayerFacade.hpp b/delegate/src/MultiLayerFacade.hpp
index 2fdfc7082a..aa00be8f60 100644
--- a/delegate/src/MultiLayerFacade.hpp
+++ b/delegate/src/MultiLayerFacade.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -117,6 +117,8 @@ public:
virtual const armnn::BaseDescriptor& GetParameters() const override { return m_NullDescriptor; }
+ void SetBackendId(const armnn::BackendId& id) override {}
+
protected:
/// Retrieve the handles to the constant values stored by the layer.
/// @return A vector of the constant tensors stored by this layer.
diff --git a/delegate/src/Normalization.hpp b/delegate/src/Normalization.hpp
index 0933552973..d0db43ea7c 100644
--- a/delegate/src/Normalization.hpp
+++ b/delegate/src/Normalization.hpp
@@ -42,6 +42,7 @@ TfLiteStatus VisitL2NormalizationOperator(DelegateData& delegateData,
descriptor.m_DataLayout = armnn::DataLayout::NHWC;
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("L2_NORMALIZATION",
@@ -49,6 +50,7 @@ TfLiteStatus VisitL2NormalizationOperator(DelegateData& delegateData,
IsL2NormalizationSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outInfo,
descriptor);
@@ -62,6 +64,7 @@ TfLiteStatus VisitL2NormalizationOperator(DelegateData& delegateData,
// Add a L2Normalization layer
armnn::IConnectableLayer* layer = delegateData.m_Network->AddL2NormalizationLayer(descriptor);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
@@ -112,6 +115,7 @@ TfLiteStatus VisitLocalResponseNormalizationOperator(DelegateData& delegateData,
descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("NORMALIZATION",
@@ -119,6 +123,7 @@ TfLiteStatus VisitLocalResponseNormalizationOperator(DelegateData& delegateData,
IsNormalizationSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outInfo,
descriptor);
@@ -132,6 +137,7 @@ TfLiteStatus VisitLocalResponseNormalizationOperator(DelegateData& delegateData,
// Add a Normalization layer
armnn::IConnectableLayer* layer = delegateData.m_Network->AddNormalizationLayer(descriptor);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
diff --git a/delegate/src/Pack.hpp b/delegate/src/Pack.hpp
index 458c1744c3..57d3b460f5 100644
--- a/delegate/src/Pack.hpp
+++ b/delegate/src/Pack.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -74,6 +74,7 @@ TfLiteStatus VisitPackOperator(DelegateData& delegateData,
// Check if supported
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("STACK",
@@ -81,6 +82,7 @@ TfLiteStatus VisitPackOperator(DelegateData& delegateData,
IsStackSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputConstTensorInfos,
outputTensorInfo,
desc);
@@ -97,6 +99,7 @@ TfLiteStatus VisitPackOperator(DelegateData& delegateData,
// The TfLite Pack operator is equivalent to the ArmNN Stack operator
armnn::IConnectableLayer* layer = delegateData.m_Network->AddStackLayer(desc);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
// Connect the Constant Inputs
diff --git a/delegate/src/Pad.hpp b/delegate/src/Pad.hpp
index daedede18d..2ecf2a06d7 100644
--- a/delegate/src/Pad.hpp
+++ b/delegate/src/Pad.hpp
@@ -149,6 +149,7 @@ TfLiteStatus VisitPadOperator(DelegateData& delegateData,
}
}
+ armnn::BackendId setBackend;
if (!delegateData.m_Network)
{
bool isSupported = false;
@@ -157,6 +158,7 @@ TfLiteStatus VisitPadOperator(DelegateData& delegateData,
IsPadSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outputTensorInfo,
descriptor);
@@ -165,6 +167,7 @@ TfLiteStatus VisitPadOperator(DelegateData& delegateData,
}
armnn::IConnectableLayer* padLayer = delegateData.m_Network->AddPadLayer(descriptor);
+ padLayer->SetBackendId(setBackend);
ARMNN_ASSERT(padLayer != nullptr);
armnn::IOutputSlot& outputSlot = padLayer->GetOutputSlot(0);
diff --git a/delegate/src/Pooling.hpp b/delegate/src/Pooling.hpp
index dfe90cb1f9..824156742d 100644
--- a/delegate/src/Pooling.hpp
+++ b/delegate/src/Pooling.hpp
@@ -84,6 +84,7 @@ TfLiteStatus VisitPooling2dOperator(DelegateData& delegateData,
descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("POOLING_2D",
@@ -91,6 +92,7 @@ TfLiteStatus VisitPooling2dOperator(DelegateData& delegateData,
IsPooling2dSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outputTensorInfo,
descriptor);
@@ -103,6 +105,7 @@ TfLiteStatus VisitPooling2dOperator(DelegateData& delegateData,
}
armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling2dLayer(descriptor);
+ poolingLayer->SetBackendId(setBackend);
ARMNN_ASSERT(poolingLayer != nullptr);
armnn::IOutputSlot& outputSlot = poolingLayer->GetOutputSlot(0);
@@ -215,12 +218,14 @@ TfLiteStatus VisitPooling3dOperator(DelegateData& delegateData,
// Validate the output info.
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported) {
FORWARD_LAYER_SUPPORT_FUNC("POOLING_3D",
tfLiteContext,
IsPooling3dSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outputTensorInfo,
descriptor);
@@ -234,6 +239,7 @@ TfLiteStatus VisitPooling3dOperator(DelegateData& delegateData,
// Create the Layer
armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling3dLayer(descriptor);
+ poolingLayer->SetBackendId(setBackend);
ARMNN_ASSERT(poolingLayer != nullptr);
// Create and set output slots
diff --git a/delegate/src/Prelu.hpp b/delegate/src/Prelu.hpp
index 398abaf4cc..06e74ed635 100644
--- a/delegate/src/Prelu.hpp
+++ b/delegate/src/Prelu.hpp
@@ -29,6 +29,7 @@ TfLiteStatus ValidatePreluOperator(DelegateData& delegateData,
IsPreluSupported,
delegateData.m_Backends,
isSupported,
+ armnn::BackendId(),
inputInfo,
alphaInfo,
outputInfo);
diff --git a/delegate/src/Quantization.hpp b/delegate/src/Quantization.hpp
index 78713759fb..64f57de505 100644
--- a/delegate/src/Quantization.hpp
+++ b/delegate/src/Quantization.hpp
@@ -51,6 +51,7 @@ TfLiteStatus VisitDequantizeOperator(DelegateData& delegateData,
UpdateConstantTensorOutputs(inputTensorInfo, outputTensorInfo);
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("DEQUANTIZE",
@@ -58,6 +59,7 @@ TfLiteStatus VisitDequantizeOperator(DelegateData& delegateData,
IsDequantizeSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outputTensorInfo);
};
@@ -69,6 +71,7 @@ TfLiteStatus VisitDequantizeOperator(DelegateData& delegateData,
}
armnn::IConnectableLayer* dequantizeLayer = delegateData.m_Network->AddDequantizeLayer();
+ dequantizeLayer->SetBackendId(setBackend);
ARMNN_ASSERT(dequantizeLayer != nullptr);
armnn::IOutputSlot& outputSlot = dequantizeLayer->GetOutputSlot(0);
@@ -130,6 +133,7 @@ TfLiteStatus VisitQuantizeOperator(DelegateData& delegateData,
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("QUANTIZE",
@@ -137,6 +141,7 @@ TfLiteStatus VisitQuantizeOperator(DelegateData& delegateData,
IsQuantizeSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outputTensorInfo);
};
@@ -148,6 +153,7 @@ TfLiteStatus VisitQuantizeOperator(DelegateData& delegateData,
}
armnn::IConnectableLayer* quantizeLayer = delegateData.m_Network->AddQuantizeLayer();
+ quantizeLayer->SetBackendId(setBackend);
ARMNN_ASSERT(quantizeLayer != nullptr);
armnn::IOutputSlot& outputSlot = quantizeLayer->GetOutputSlot(0);
diff --git a/delegate/src/Redefine.hpp b/delegate/src/Redefine.hpp
index cdae719373..8f9a4e4ba0 100644
--- a/delegate/src/Redefine.hpp
+++ b/delegate/src/Redefine.hpp
@@ -44,6 +44,7 @@ TfLiteStatus VisitCastOperator(DelegateData& delegateData,
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("CAST",
@@ -51,6 +52,7 @@ TfLiteStatus VisitCastOperator(DelegateData& delegateData,
IsCastSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outInfo);
};
@@ -66,6 +68,7 @@ TfLiteStatus VisitCastOperator(DelegateData& delegateData,
// Add a Cast layer
armnn::IConnectableLayer* layer = delegateData.m_Network->AddCastLayer();
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
@@ -210,6 +213,7 @@ TfLiteStatus VisitReshapeOperator(DelegateData& delegateData,
}
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("RESHAPE",
@@ -217,6 +221,7 @@ TfLiteStatus VisitReshapeOperator(DelegateData& delegateData,
IsReshapeSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo0,
outInfo,
reshapeDesc);
@@ -229,6 +234,7 @@ TfLiteStatus VisitReshapeOperator(DelegateData& delegateData,
}
armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
diff --git a/delegate/src/Reduce.hpp b/delegate/src/Reduce.hpp
index 79f2f52185..3f4c118e3d 100644
--- a/delegate/src/Reduce.hpp
+++ b/delegate/src/Reduce.hpp
@@ -105,6 +105,7 @@ TfLiteStatus VisitReduceOperator(DelegateData& delegateData,
}
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("REDUCE",
@@ -112,6 +113,7 @@ TfLiteStatus VisitReduceOperator(DelegateData& delegateData,
IsReduceSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outInfo,
desc);
@@ -125,6 +127,7 @@ TfLiteStatus VisitReduceOperator(DelegateData& delegateData,
// Add an Reduce layer
armnn::IConnectableLayer* layer = delegateData.m_Network->AddReduceLayer(desc);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
diff --git a/delegate/src/Resize.hpp b/delegate/src/Resize.hpp
index b59006cdef..0cb15d30e4 100644
--- a/delegate/src/Resize.hpp
+++ b/delegate/src/Resize.hpp
@@ -33,6 +33,7 @@ TfLiteStatus ValidateResizeOperator(DelegateData& delegateData,
IsResizeSupported,
delegateData.m_Backends,
isSupported,
+ armnn::BackendId(),
inputInfo,
outputInfo,
descriptor);
diff --git a/delegate/src/Shape.hpp b/delegate/src/Shape.hpp
index 284dc9fbb7..625e6a88fb 100644
--- a/delegate/src/Shape.hpp
+++ b/delegate/src/Shape.hpp
@@ -52,6 +52,7 @@ TfLiteStatus VisitShapeOperator(DelegateData& delegateData,
}
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("SHAPE",
@@ -59,6 +60,7 @@ TfLiteStatus VisitShapeOperator(DelegateData& delegateData,
IsShapeSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outInfo);
};
@@ -74,6 +76,7 @@ TfLiteStatus VisitShapeOperator(DelegateData& delegateData,
// Add a Shape layer
armnn::IConnectableLayer* layer = delegateData.m_Network->AddShapeLayer();
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
diff --git a/delegate/src/SharedFunctions.cpp b/delegate/src/SharedFunctions.cpp
index ad5d3101a2..22f578a9d7 100644
--- a/delegate/src/SharedFunctions.cpp
+++ b/delegate/src/SharedFunctions.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -29,6 +29,7 @@ TfLiteStatus ValidateFloorOperator(DelegateData& delegateData,
IsFloorSupported,
delegateData.m_Backends,
isSupported,
+ armnn::BackendId(),
inputTensorInfo,
outInfo);
};
diff --git a/delegate/src/Slice.hpp b/delegate/src/Slice.hpp
index cbcb45ec65..d5712aefad 100644
--- a/delegate/src/Slice.hpp
+++ b/delegate/src/Slice.hpp
@@ -99,6 +99,7 @@ TfLiteStatus VisitSliceOperator(DelegateData& delegateData,
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("SLICE",
@@ -106,6 +107,7 @@ TfLiteStatus VisitSliceOperator(DelegateData& delegateData,
IsSliceSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outInfo,
descriptor);
@@ -117,8 +119,9 @@ TfLiteStatus VisitSliceOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- // Add a StridedSlice layer
+ // Add a Slice layer
armnn::IConnectableLayer* layer = delegateData.m_Network->AddSliceLayer(descriptor);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
@@ -129,3 +132,4 @@ TfLiteStatus VisitSliceOperator(DelegateData& delegateData,
}
} // namespace armnnDelegate
+
diff --git a/delegate/src/Softmax.hpp b/delegate/src/Softmax.hpp
index efc1cbae16..738f542239 100644
--- a/delegate/src/Softmax.hpp
+++ b/delegate/src/Softmax.hpp
@@ -27,6 +27,7 @@ TfLiteStatus ValidateSoftmaxOperator(DelegateData& delegateData,
IsSoftmaxSupported,
delegateData.m_Backends,
isSupported,
+ armnn::BackendId(),
inputInfo,
outputTensorInfo,
descriptor);
@@ -46,6 +47,7 @@ TfLiteStatus ValidateLogSoftmaxOperator(DelegateData& delegateData,
IsLogSoftmaxSupported,
delegateData.m_Backends,
isSupported,
+ armnn::BackendId(),
inputInfo,
outputTensorInfo,
descriptor);
diff --git a/delegate/src/SpaceDepth.hpp b/delegate/src/SpaceDepth.hpp
index 593d0e7f87..2172d8678b 100644
--- a/delegate/src/SpaceDepth.hpp
+++ b/delegate/src/SpaceDepth.hpp
@@ -43,6 +43,7 @@ TfLiteStatus VisitSpaceToDepthOperator(DelegateData& delegateData,
descriptor.m_BlockSize = params->block_size;
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("SPACE_TO_DEPTH",
@@ -50,6 +51,7 @@ TfLiteStatus VisitSpaceToDepthOperator(DelegateData& delegateData,
IsSpaceToDepthSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outInfo,
descriptor);
@@ -63,6 +65,7 @@ TfLiteStatus VisitSpaceToDepthOperator(DelegateData& delegateData,
// Add a SpaceToDepth layer
armnn::IConnectableLayer* layer = delegateData.m_Network->AddSpaceToDepthLayer(descriptor);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
@@ -102,6 +105,7 @@ TfLiteStatus VisitDepthToSpaceOperator(DelegateData& delegateData,
descriptor.m_BlockSize = params->block_size;
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("DEPTH_TO_SPACE",
@@ -109,6 +113,7 @@ TfLiteStatus VisitDepthToSpaceOperator(DelegateData& delegateData,
IsDepthToSpaceSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outInfo,
descriptor);
@@ -122,6 +127,7 @@ TfLiteStatus VisitDepthToSpaceOperator(DelegateData& delegateData,
// Add a DepthToSpace layer
armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthToSpaceLayer(descriptor);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
diff --git a/delegate/src/Split.hpp b/delegate/src/Split.hpp
index a535585699..5c094b405b 100644
--- a/delegate/src/Split.hpp
+++ b/delegate/src/Split.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -113,6 +113,7 @@ TfLiteStatus VisitSplitOperator(DelegateData& delegateData,
splitDescriptor.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
}
+ armnn::BackendId setBackend;
if (!delegateData.m_Network)
{
// Check if supported
@@ -122,6 +123,7 @@ TfLiteStatus VisitSplitOperator(DelegateData& delegateData,
IsSplitterSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outputTensorInfos,
splitDescriptor);
@@ -129,6 +131,7 @@ TfLiteStatus VisitSplitOperator(DelegateData& delegateData,
}
armnn::IConnectableLayer* layer = delegateData.m_Network->AddSplitterLayer(splitDescriptor);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
@@ -305,6 +308,7 @@ TfLiteStatus VisitSplitVOperator(DelegateData& delegateData,
accumSplit += splitSize;
}
+ armnn::BackendId setBackend;
if (!delegateData.m_Network)
{
// Check if supported
@@ -314,6 +318,7 @@ TfLiteStatus VisitSplitVOperator(DelegateData& delegateData,
IsSplitterSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outputTensorInfos,
splitDescriptor);
@@ -321,6 +326,7 @@ TfLiteStatus VisitSplitVOperator(DelegateData& delegateData,
}
armnn::IConnectableLayer* layer = delegateData.m_Network->AddSplitterLayer(splitDescriptor);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
diff --git a/delegate/src/StridedSlice.hpp b/delegate/src/StridedSlice.hpp
index 515c819ffe..d2c4d5da3a 100644
--- a/delegate/src/StridedSlice.hpp
+++ b/delegate/src/StridedSlice.hpp
@@ -114,6 +114,7 @@ TfLiteStatus VisitStridedSliceOperator(DelegateData& delegateData,
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("STRIDED_SLICE",
@@ -121,6 +122,7 @@ TfLiteStatus VisitStridedSliceOperator(DelegateData& delegateData,
IsStridedSliceSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outInfo,
descriptor);
@@ -134,6 +136,7 @@ TfLiteStatus VisitStridedSliceOperator(DelegateData& delegateData,
// Add a StridedSlice layer
armnn::IConnectableLayer* layer = delegateData.m_Network->AddStridedSliceLayer(descriptor);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
@@ -144,3 +147,4 @@ TfLiteStatus VisitStridedSliceOperator(DelegateData& delegateData,
}
} // namespace armnnDelegate
+
diff --git a/delegate/src/Transpose.hpp b/delegate/src/Transpose.hpp
index 80bb12254e..15c53101f2 100644
--- a/delegate/src/Transpose.hpp
+++ b/delegate/src/Transpose.hpp
@@ -71,7 +71,7 @@ TfLiteStatus VisitTransposeOperator(DelegateData& delegateData,
static_cast<armnn::PermutationVector::SizeType>(numEl)));
bool isSupported = false;
-
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("TRANSPOSE",
@@ -79,6 +79,7 @@ TfLiteStatus VisitTransposeOperator(DelegateData& delegateData,
IsTransposeSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo0,
outputTensorInfo,
descriptor);
@@ -91,6 +92,7 @@ TfLiteStatus VisitTransposeOperator(DelegateData& delegateData,
}
armnn::IConnectableLayer* transposeLayer = delegateData.m_Network->AddTransposeLayer(descriptor);
+ transposeLayer->SetBackendId(setBackend);
ARMNN_ASSERT(transposeLayer != nullptr);
ARMNN_ASSERT(transposeLayer->GetNumInputSlots() == 1); // permutation vector given to descriptor object
diff --git a/delegate/src/UnidirectionalSequenceLstm.hpp b/delegate/src/UnidirectionalSequenceLstm.hpp
index 64ed778231..9408397587 100644
--- a/delegate/src/UnidirectionalSequenceLstm.hpp
+++ b/delegate/src/UnidirectionalSequenceLstm.hpp
@@ -253,6 +253,7 @@ TfLiteStatus VisitUnidirectionalSequenceLstmOperator(DelegateData& delegateData,
}
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("UNIDIRECTIONAL_SEQUENCE_LSTM",
@@ -260,6 +261,7 @@ TfLiteStatus VisitUnidirectionalSequenceLstmOperator(DelegateData& delegateData,
IsUnidirectionalSequenceLstmSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outputStateInInfo,
cellStateInInfo,
@@ -277,6 +279,7 @@ TfLiteStatus VisitUnidirectionalSequenceLstmOperator(DelegateData& delegateData,
}
armnn::IConnectableLayer* layer = delegateData.m_Network->AddUnidirectionalSequenceLstmLayer(desc, params);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
layer->GetOutputSlot(0).SetTensorInfo(outputStateOutTensorInfo);
diff --git a/delegate/src/Unpack.hpp b/delegate/src/Unpack.hpp
index aaea00532f..ad541adccc 100644
--- a/delegate/src/Unpack.hpp
+++ b/delegate/src/Unpack.hpp
@@ -133,6 +133,7 @@ TfLiteStatus VisitUnpackOperator(DelegateData& delegateData,
std::vector<std::reference_wrapper<armnn::TensorInfo>> splitterOutputTensorInfos(splitterOutputs.begin(),
splitterOutputs.end());
+ armnn::BackendId setBackendSplit;
if (!delegateData.m_Network)
{
// Check if splitter is supported
@@ -142,6 +143,7 @@ TfLiteStatus VisitUnpackOperator(DelegateData& delegateData,
IsSplitterSupported,
delegateData.m_Backends,
isSupported,
+ setBackendSplit,
inputTensorInfo,
splitterOutputTensorInfos,
splitDesc);
@@ -153,6 +155,7 @@ TfLiteStatus VisitUnpackOperator(DelegateData& delegateData,
armnn::ReshapeDescriptor reshapeDescriptor;
reshapeDescriptor.m_TargetShape = outputTensorInfos[0].get().GetShape();
+ armnn::BackendId setBackendReshape;
if (!delegateData.m_Network)
{
bool isSupported = false;
@@ -161,6 +164,7 @@ TfLiteStatus VisitUnpackOperator(DelegateData& delegateData,
IsReshapeSupported,
delegateData.m_Backends,
isSupported,
+ setBackendReshape,
splitterOutputTensorInfos[0],
outputTensorInfos[0],
reshapeDescriptor);
@@ -171,6 +175,7 @@ TfLiteStatus VisitUnpackOperator(DelegateData& delegateData,
armnn::IConnectableLayer* splitterLayer = delegateData.m_Network->AddSplitterLayer(splitDesc,
splitterLayerName.c_str());
+ splitterLayer->SetBackendId(setBackendSplit);
ARMNN_ASSERT(splitterLayer != nullptr);
for (unsigned int k = 0; k < splitterLayer->GetNumOutputSlots(); ++k)
@@ -187,6 +192,7 @@ TfLiteStatus VisitUnpackOperator(DelegateData& delegateData,
std::string reshapeLayerName("Unpack Reshape");
armnn::IConnectableLayer* reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor,
reshapeLayerName.c_str());
+ reshapeLayer->SetBackendId(setBackendReshape);
ARMNN_ASSERT(reshapeLayer != nullptr);
splitterLayer->GetOutputSlot(outputIndex).SetTensorInfo(splitterOutputTensorInfos[outputIndex]);