aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCathal Corbett <cathal.corbett@arm.com>2022-09-01 11:34:37 +0100
committerCathal Corbett <cathal.corbett@arm.com>2022-12-12 12:38:15 +0000
commit5383767a7a759c867235ab66bd71f88281e3bd06 (patch)
tree5704c33171d39dda9e4428c953e2efdd62ead656
parenta98e79a709f7c29728e1fc79c21ba5265993b8b6 (diff)
downloadarmnn-5383767a7a759c867235ab66bd71f88281e3bd06.tar.gz
Optimize the calling of IsLayerSupported().
* Done as part of 22.11/23.02 innovation days. * IsLayerSupported() is called in model prepare (delegate, android-nn-driver and shim/support_library) and again in ArmNN once model otimization is performed. * From calling IsLayerSupported() the first time, we should know that the layers are supported and what backend they are supported on. * Solution is to set the BackendId of the IConnectableLayer when IsLayerSupported() is called the first time, * In the Optimize() function we then check if the backend is set. If so, we do not call IsLayerSupported() again. * In the case a layer that is supported gets optimized, then the BackendId of that layer get set to "Unknown" for the new optimized layer and IsLayerSupported() will get called on the newly optimized layer. * Includes bug fix IVGCVSW-7213 for Android Mean FP16 CpuAcc tests. Also related to bug IVGCVSW-7211. Signed-off-by: Cathal Corbett <cathal.corbett@arm.com> Change-Id: I7a7820d0cdb079ffb5a3a2e0c44e252f652df53b
-rw-r--r--InstallationViaAptRepository.md2
-rw-r--r--delegate/src/Activation.hpp1
-rw-r--r--delegate/src/ArgMinMax.hpp3
-rw-r--r--delegate/src/BatchMatMul.hpp5
-rw-r--r--delegate/src/BatchSpace.hpp6
-rw-r--r--delegate/src/Comparison.hpp4
-rw-r--r--delegate/src/Control.hpp6
-rw-r--r--delegate/src/Convolution.hpp12
-rw-r--r--delegate/src/DelegateUtils.hpp15
-rw-r--r--delegate/src/ElementwiseBinary.hpp6
-rw-r--r--delegate/src/ElementwiseUnary.hpp4
-rw-r--r--delegate/src/Fill.hpp3
-rw-r--r--delegate/src/FullyConnected.hpp3
-rw-r--r--delegate/src/Gather.hpp5
-rw-r--r--delegate/src/GatherNd.hpp3
-rw-r--r--delegate/src/LogicalBinary.hpp3
-rw-r--r--delegate/src/Lstm.hpp3
-rw-r--r--delegate/src/MultiLayerFacade.hpp4
-rw-r--r--delegate/src/Normalization.hpp6
-rw-r--r--delegate/src/Pack.hpp5
-rw-r--r--delegate/src/Pad.hpp3
-rw-r--r--delegate/src/Pooling.hpp6
-rw-r--r--delegate/src/Prelu.hpp1
-rw-r--r--delegate/src/Quantization.hpp6
-rw-r--r--delegate/src/Redefine.hpp6
-rw-r--r--delegate/src/Reduce.hpp3
-rw-r--r--delegate/src/Resize.hpp1
-rw-r--r--delegate/src/Shape.hpp3
-rw-r--r--delegate/src/SharedFunctions.cpp3
-rw-r--r--delegate/src/Slice.hpp6
-rw-r--r--delegate/src/Softmax.hpp2
-rw-r--r--delegate/src/SpaceDepth.hpp6
-rw-r--r--delegate/src/Split.hpp8
-rw-r--r--delegate/src/StridedSlice.hpp4
-rw-r--r--delegate/src/Transpose.hpp4
-rw-r--r--delegate/src/UnidirectionalSequenceLstm.hpp3
-rw-r--r--delegate/src/Unpack.hpp6
-rw-r--r--include/armnn/INetwork.hpp14
-rw-r--r--include/armnn/Version.hpp2
-rw-r--r--python/pyarmnn/README.md6
-rw-r--r--python/pyarmnn/examples/image_classification/README.md2
-rw-r--r--python/pyarmnn/examples/keyword_spotting/README.md2
-rw-r--r--python/pyarmnn/examples/object_detection/README.md2
-rw-r--r--python/pyarmnn/examples/speech_recognition/README.md2
-rw-r--r--python/pyarmnn/src/pyarmnn/_version.py4
-rw-r--r--python/pyarmnn/test/test_setup.py8
-rw-r--r--python/pyarmnn/test/test_version.py4
-rw-r--r--shim/sl/canonical/ConversionUtils.cpp18
-rw-r--r--shim/sl/canonical/ConversionUtils.hpp16
-rw-r--r--shim/sl/canonical/Converter.cpp171
-rw-r--r--src/armnn/Layer.hpp4
-rw-r--r--src/armnn/Network.cpp61
52 files changed, 428 insertions, 58 deletions
diff --git a/InstallationViaAptRepository.md b/InstallationViaAptRepository.md
index 037e5cc7f1..fac714f2a6 100644
--- a/InstallationViaAptRepository.md
+++ b/InstallationViaAptRepository.md
@@ -117,7 +117,7 @@ The easiest way to install all of the available packages for your systems archit
sudo apt-get install -y python3-pyarmnn armnn-latest-all
# Verify installation via python:
python3 -c "import pyarmnn as ann;print(ann.GetVersion())"
- # Returns '{ARMNN_MAJOR_VERSION}.0.0' e.g. 31.0.0
+ # Returns '{ARMNN_MAJOR_VERSION}.0.0' e.g. 32.0.0
```
This will install PyArmNN and the three backends for Neon (CpuAcc), OpenCL (GpuAcc) and our Reference Backend.
It will also install their dependencies including the arm-compute-library package along with the Tensorflow Lite Parser
diff --git a/delegate/src/Activation.hpp b/delegate/src/Activation.hpp
index 0071873d16..3560bfdae7 100644
--- a/delegate/src/Activation.hpp
+++ b/delegate/src/Activation.hpp
@@ -29,6 +29,7 @@ TfLiteStatus ValidateActivationOperator(DelegateData& delegateData,
IsActivationSupported,
delegateData.m_Backends,
isSupported,
+ armnn::BackendId(),
inputInfo,
outputInfo,
activationDesc);
diff --git a/delegate/src/ArgMinMax.hpp b/delegate/src/ArgMinMax.hpp
index 057dc8ba0a..dd28807f67 100644
--- a/delegate/src/ArgMinMax.hpp
+++ b/delegate/src/ArgMinMax.hpp
@@ -91,6 +91,7 @@ TfLiteStatus VisitArgMinMaxOperator(DelegateData& delegateData,
}
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("ARGMINMAX",
@@ -98,6 +99,7 @@ TfLiteStatus VisitArgMinMaxOperator(DelegateData& delegateData,
IsArgMinMaxSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outInfo,
desc);
@@ -111,6 +113,7 @@ TfLiteStatus VisitArgMinMaxOperator(DelegateData& delegateData,
// Add an ArgMinMax layer
armnn::IConnectableLayer* layer = delegateData.m_Network->AddArgMinMaxLayer(desc);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
diff --git a/delegate/src/BatchMatMul.hpp b/delegate/src/BatchMatMul.hpp
index 391301e4d7..3b884a092f 100644
--- a/delegate/src/BatchMatMul.hpp
+++ b/delegate/src/BatchMatMul.hpp
@@ -68,6 +68,7 @@ namespace armnnDelegate
// Check if supported
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("BATCH_MATMUL",
@@ -75,6 +76,7 @@ namespace armnnDelegate
IsBatchMatMulSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
armnnLHSInputTensorInfo,
armnnRHSInputTensorInfo,
outputTensorInfo,
@@ -88,6 +90,7 @@ namespace armnnDelegate
}
armnn::IConnectableLayer* layer = delegateData.m_Network->AddBatchMatMulLayer(descriptor);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
@@ -96,4 +99,4 @@ namespace armnnDelegate
return kTfLiteOk;
}
-} // namespace armnnDelegate \ No newline at end of file
+} // namespace armnnDelegate
diff --git a/delegate/src/BatchSpace.hpp b/delegate/src/BatchSpace.hpp
index 847d6f15d2..903fe37eae 100644
--- a/delegate/src/BatchSpace.hpp
+++ b/delegate/src/BatchSpace.hpp
@@ -72,6 +72,7 @@ TfLiteStatus VisitBatchToSpaceNdOperator(DelegateData& delegateData,
// Check if supported
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("BATCH_TO_SPACE_ND",
@@ -79,6 +80,7 @@ TfLiteStatus VisitBatchToSpaceNdOperator(DelegateData& delegateData,
IsBatchToSpaceNdSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outputTensorInfo,
descriptor);
@@ -95,6 +97,7 @@ TfLiteStatus VisitBatchToSpaceNdOperator(DelegateData& delegateData,
// Add a BatchToSpace layer
armnn::IConnectableLayer* layer = delegateData.m_Network->AddBatchToSpaceNdLayer(descriptor);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
@@ -163,6 +166,7 @@ TfLiteStatus VisitSpaceToBatchNdOperator(DelegateData& delegateData,
// Check if supported
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("SPACE_TO_BATCH_ND",
@@ -170,6 +174,7 @@ TfLiteStatus VisitSpaceToBatchNdOperator(DelegateData& delegateData,
IsSpaceToBatchNdSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outputTensorInfo,
descriptor);
@@ -186,6 +191,7 @@ TfLiteStatus VisitSpaceToBatchNdOperator(DelegateData& delegateData,
// Add a SpaceToBatch layer
armnn::IConnectableLayer* layer = delegateData.m_Network->AddSpaceToBatchNdLayer(descriptor);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
diff --git a/delegate/src/Comparison.hpp b/delegate/src/Comparison.hpp
index 8bf53c71ef..ee121e3c5c 100644
--- a/delegate/src/Comparison.hpp
+++ b/delegate/src/Comparison.hpp
@@ -88,7 +88,7 @@ TfLiteStatus VisitComparisonOperator(DelegateData& delegateData,
armnn::ComparisonDescriptor descriptor(comparisonOperation);
bool isSupported = false;
-
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("COMPARISON",
@@ -96,6 +96,7 @@ TfLiteStatus VisitComparisonOperator(DelegateData& delegateData,
IsComparisonSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo0,
inputTensorInfo1,
outputTensorInfo,
@@ -109,6 +110,7 @@ TfLiteStatus VisitComparisonOperator(DelegateData& delegateData,
}
armnn::IConnectableLayer* comparisonLayer = delegateData.m_Network->AddComparisonLayer(descriptor);
+ comparisonLayer->SetBackendId(setBackend);
ARMNN_ASSERT(comparisonLayer != nullptr);
armnn::IOutputSlot& outputSlot = comparisonLayer->GetOutputSlot(0);
diff --git a/delegate/src/Control.hpp b/delegate/src/Control.hpp
index f04245bcb6..02426a5616 100644
--- a/delegate/src/Control.hpp
+++ b/delegate/src/Control.hpp
@@ -119,6 +119,7 @@ TfLiteStatus VisitConcatenationOperator(DelegateData& delegateData,
// Check if supported
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("CONCATENATION",
@@ -126,6 +127,7 @@ TfLiteStatus VisitConcatenationOperator(DelegateData& delegateData,
IsConcatSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputConstTensorInfos,
outputTensorInfo,
concatDescriptor);
@@ -139,6 +141,7 @@ TfLiteStatus VisitConcatenationOperator(DelegateData& delegateData,
// Setup layer and connect.
armnn::IConnectableLayer* concatenationLayer = delegateData.m_Network->AddConcatLayer(concatDescriptor);
+ concatenationLayer->SetBackendId(setBackend);
ARMNN_ASSERT(concatenationLayer != nullptr);
// Connect the Constant Inputs
@@ -258,6 +261,7 @@ TfLiteStatus VisitMeanOperator(DelegateData& delegateData,
// Check if supported
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("MEAN",
@@ -265,6 +269,7 @@ TfLiteStatus VisitMeanOperator(DelegateData& delegateData,
IsMeanSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outputTensorInfo,
desc);
@@ -278,6 +283,7 @@ TfLiteStatus VisitMeanOperator(DelegateData& delegateData,
// Setup layer and connect.
armnn::IConnectableLayer* meanLayer = delegateData.m_Network->AddMeanLayer(desc);
+ meanLayer->SetBackendId(setBackend);
ARMNN_ASSERT(meanLayer != nullptr);
armnn::IOutputSlot& outputSlot = meanLayer->GetOutputSlot(0);
diff --git a/delegate/src/Convolution.hpp b/delegate/src/Convolution.hpp
index 93da4c8ce2..e307bb9be3 100644
--- a/delegate/src/Convolution.hpp
+++ b/delegate/src/Convolution.hpp
@@ -144,6 +144,7 @@ TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
+ armnn::BackendId setBackend;
if (!delegateData.m_Network)
{
bool isSupported = false;
@@ -152,6 +153,7 @@ TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
IsConvolution2dSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outputTensorInfo,
descriptor,
@@ -162,6 +164,7 @@ TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
// Set up filter and biases
armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution2dLayer(descriptor);
+ layer->SetBackendId(setBackend);
if(tflite::IsConstantTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[1]]))
{
@@ -300,6 +303,7 @@ TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
// If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the
// support for the operator
// If supported, VisitConvolutionOperator will be called again to add the layer to the network as seen below.
+ armnn::BackendId setBackend;
if (!delegateData.m_Network)
{
bool isSupported = false;
@@ -308,6 +312,7 @@ TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
IsConvolution3dSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outputTensorInfo,
descriptor,
@@ -317,6 +322,7 @@ TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
}
armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution3dLayer(descriptor);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
// Add a constant layer for weights and biases if inputs are constant,
@@ -497,6 +503,7 @@ TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
}
+ armnn::BackendId setBackend;
if (!delegateData.m_Network)
{
bool isSupported = false;
@@ -505,6 +512,7 @@ TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
IsDepthwiseConvolutionSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outputTensorInfo,
descriptor,
@@ -514,6 +522,7 @@ TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
}
armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthwiseConvolution2dLayer(descriptor);
+ layer->SetBackendId(setBackend);
if(tflite::IsConstantTensor(&tfLiteFilterTensor))
{
@@ -699,6 +708,7 @@ TfLiteStatus VisitTransposeConv2dOperator(DelegateData& delegateData,
auto filterTensor = CreateConstTensor(&tfLiteFilterTensor,
filterTensorInfo,
armnn::Optional<armnn::PermutationVector&>());
+ armnn::BackendId setBackend;
if (!delegateData.m_Network)
{
bool isSupported = false;
@@ -707,6 +717,7 @@ TfLiteStatus VisitTransposeConv2dOperator(DelegateData& delegateData,
IsTransposeConvolution2dSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outputTensorInfo,
descriptor,
@@ -718,6 +729,7 @@ TfLiteStatus VisitTransposeConv2dOperator(DelegateData& delegateData,
armnn::IConnectableLayer* layer = delegateData.m_Network->AddTransposeConvolution2dLayer(descriptor,
filterTensor,
armnn::EmptyOptional());
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
diff --git a/delegate/src/DelegateUtils.hpp b/delegate/src/DelegateUtils.hpp
index 58d8048be3..850b279fea 100644
--- a/delegate/src/DelegateUtils.hpp
+++ b/delegate/src/DelegateUtils.hpp
@@ -25,7 +25,7 @@ namespace
{
// Macro to call an Is<layer_name>Supported function and log caller name together with reason for lack of support
-#define FORWARD_LAYER_SUPPORT_FUNC(opName, tfLiteContext, func, backends, supported, ...) \
+#define FORWARD_LAYER_SUPPORT_FUNC(opName, tfLiteContext, func, backends, supported, setBackend, ...) \
try \
{ \
for (auto&& backendId : backends) \
@@ -38,6 +38,7 @@ try \
layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
if (supported) \
{ \
+ setBackend = backendId; \
break; \
} \
else \
@@ -224,11 +225,13 @@ armnn::IConnectableLayer* BroadcastTensor(const armnn::TensorInfo& inputInfo0,
armnn::ReshapeDescriptor reshapeDescriptor;
reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
bool isSupported = false;
+ armnn::BackendId setBackend;
FORWARD_LAYER_SUPPORT_FUNC("RESHAPE",
tfLiteContext,
IsReshapeSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
smallInfo,
reshapedInfo,
reshapeDescriptor);
@@ -240,6 +243,7 @@ armnn::IConnectableLayer* BroadcastTensor(const armnn::TensorInfo& inputInfo0,
ARMNN_ASSERT(delegateData.m_Network != nullptr);
// Add Reshape layer
armnn::IConnectableLayer* reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor);
+ reshapeLayer->SetBackendId(setBackend);
ARMNN_ASSERT(reshapeLayer != nullptr);
reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
@@ -331,11 +335,13 @@ TfLiteStatus FusedActivation(TfLiteContext* tfLiteContext,
}
bool isSupported = false;
+ armnn::BackendId setBackend;
FORWARD_LAYER_SUPPORT_FUNC("ACTIVATION",
tfLiteContext,
IsActivationSupported,
data.m_Backends,
isSupported,
+ setBackend,
prevLayer->GetOutputSlot(0).GetTensorInfo(),
activationOutputInfo,
activationDesc);
@@ -344,6 +350,7 @@ TfLiteStatus FusedActivation(TfLiteContext* tfLiteContext,
return kTfLiteError;
}
armnn::IConnectableLayer* activationLayer = data.m_Network->AddActivationLayer(activationDesc);
+ activationLayer->SetBackendId(setBackend);
ARMNN_ASSERT(activationLayer != nullptr);
activationLayer->GetOutputSlot(0).SetTensorInfo(activationOutputInfo);
@@ -566,11 +573,13 @@ TfLiteStatus ConnectConstant(armnn::IConnectableLayer* layer,
{
IgnoreUnused(layer);
bool isSupported = false;
+ armnn::BackendId setBackend;
FORWARD_LAYER_SUPPORT_FUNC("CONSTANT",
tfLiteContext,
IsConstantSupported,
data.m_Backends,
isSupported,
+ setBackend,
constTensorInfo);
if (!isSupported)
{
@@ -581,6 +590,7 @@ TfLiteStatus ConnectConstant(armnn::IConnectableLayer* layer,
constTensorInfo,
armnn::Optional<armnn::PermutationVector&>());
armnn::IConnectableLayer* constantLayer = data.m_Network->AddConstantLayer(constantInput);
+ constantLayer->SetBackendId(setBackend);
armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
outputSlot.SetTensorInfo(constTensorInfo);
@@ -615,11 +625,13 @@ TfLiteStatus ProcessInputs(armnn::IConnectableLayer* layer,
{
armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
bool isSupported = false;
+ armnn::BackendId setBackend;
FORWARD_LAYER_SUPPORT_FUNC("CONSTANT",
tfLiteContext,
IsConstantSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo);
if (!isSupported)
{
@@ -629,6 +641,7 @@ TfLiteStatus ProcessInputs(armnn::IConnectableLayer* layer,
inputTensorInfo,
armnn::Optional<armnn::PermutationVector&>());
armnn::IConnectableLayer* constantLayer = delegateData.m_Network->AddConstantLayer(constantInput);
+ constantLayer->SetBackendId(setBackend);
armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
outputSlot.SetTensorInfo(inputTensorInfo);
diff --git a/delegate/src/ElementwiseBinary.hpp b/delegate/src/ElementwiseBinary.hpp
index 6e81db4b4f..caf02624be 100644
--- a/delegate/src/ElementwiseBinary.hpp
+++ b/delegate/src/ElementwiseBinary.hpp
@@ -32,6 +32,7 @@ TfLiteStatus ValidateAddOperator(DelegateData& delegateData,
IsAdditionSupported,
delegateData.m_Backends,
isSupported,
+ armnn::BackendId(),
inputInfo1,
inputInfo2,
outputTensorInfo);
@@ -56,6 +57,7 @@ TfLiteStatus ValidateDivOperator(DelegateData& delegateData,
IsDivisionSupported,
delegateData.m_Backends,
isSupported,
+ armnn::BackendId(),
inputInfo1,
inputInfo2,
outputTensorInfo);
@@ -108,6 +110,7 @@ TfLiteStatus ValidateMaximumOperator(DelegateData& delegateData,
IsMaximumSupported,
delegateData.m_Backends,
isSupported,
+ armnn::BackendId(),
inputInfo1,
inputInfo2,
outputTensorInfo);
@@ -131,6 +134,7 @@ TfLiteStatus ValidateMinimumOperator(DelegateData& delegateData,
IsMinimumSupported,
delegateData.m_Backends,
isSupported,
+ armnn::BackendId(),
inputInfo1,
inputInfo2,
outputTensorInfo);
@@ -154,6 +158,7 @@ TfLiteStatus ValidateMulOperator(DelegateData& delegateData,
IsMultiplicationSupported,
delegateData.m_Backends,
isSupported,
+ armnn::BackendId(),
inputInfo1,
inputInfo2,
outputTensorInfo);
@@ -177,6 +182,7 @@ TfLiteStatus ValidateSubOperator(DelegateData& delegateData,
IsSubtractionSupported,
delegateData.m_Backends,
isSupported,
+ armnn::BackendId(),
inputInfo1,
inputInfo2,
outputTensorInfo);
diff --git a/delegate/src/ElementwiseUnary.hpp b/delegate/src/ElementwiseUnary.hpp
index 79d7f82249..947e531162 100644
--- a/delegate/src/ElementwiseUnary.hpp
+++ b/delegate/src/ElementwiseUnary.hpp
@@ -51,7 +51,7 @@ TfLiteStatus VisitElementwiseUnaryOperator(DelegateData& delegateData,
armnn::ElementwiseUnaryDescriptor descriptor(unaryOperation);
bool isSupported = false;
-
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("ELEMENTWISE_UNARY",
@@ -59,6 +59,7 @@ TfLiteStatus VisitElementwiseUnaryOperator(DelegateData& delegateData,
IsElementwiseUnarySupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outputTensorInfo,
descriptor);
@@ -71,6 +72,7 @@ TfLiteStatus VisitElementwiseUnaryOperator(DelegateData& delegateData,
}
armnn::IConnectableLayer* layer = delegateData.m_Network->AddElementwiseUnaryLayer(descriptor);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
diff --git a/delegate/src/Fill.hpp b/delegate/src/Fill.hpp
index dc30e53ba9..e79133e15c 100644
--- a/delegate/src/Fill.hpp
+++ b/delegate/src/Fill.hpp
@@ -72,6 +72,7 @@ TfLiteStatus VisitFillOperator(DelegateData& delegateData,
}
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("FILL",
@@ -79,6 +80,7 @@ TfLiteStatus VisitFillOperator(DelegateData& delegateData,
IsFillSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outInfo,
descriptor);
@@ -91,6 +93,7 @@ TfLiteStatus VisitFillOperator(DelegateData& delegateData,
}
armnn::IConnectableLayer* layer = delegateData.m_Network->AddFillLayer(descriptor);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
diff --git a/delegate/src/FullyConnected.hpp b/delegate/src/FullyConnected.hpp
index 6677ab900e..a2960e299b 100644
--- a/delegate/src/FullyConnected.hpp
+++ b/delegate/src/FullyConnected.hpp
@@ -110,6 +110,7 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData,
descriptor.m_ConstantWeights = isConstantWeights;
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("FULLY_CONNECTED",
@@ -117,6 +118,7 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData,
IsFullyConnectedSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
reshapedTensorInfo,
outputTensorInfo,
weightsTensorInfo,
@@ -131,6 +133,7 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData,
}
armnn::IConnectableLayer* layer = delegateData.m_Network->AddFullyConnectedLayer(descriptor);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
// Add a constant layer for weights and biases if inputs are constant.
diff --git a/delegate/src/Gather.hpp b/delegate/src/Gather.hpp
index 616de7e09e..9e98966471 100644
--- a/delegate/src/Gather.hpp
+++ b/delegate/src/Gather.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -69,6 +69,7 @@ TfLiteStatus VisitGatherOperator(DelegateData& delegateData,
return kTfLiteError;
}
+ armnn::BackendId setBackend;
if (!delegateData.m_Network)
{
// Check if supported
@@ -78,6 +79,7 @@ TfLiteStatus VisitGatherOperator(DelegateData& delegateData,
IsGatherSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
indicesTensorInfo,
outputTensorInfo,
@@ -86,6 +88,7 @@ TfLiteStatus VisitGatherOperator(DelegateData& delegateData,
}
armnn::IConnectableLayer* layer = delegateData.m_Network->AddGatherLayer(gatherDescriptor);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
diff --git a/delegate/src/GatherNd.hpp b/delegate/src/GatherNd.hpp
index 1e12c5cf68..f2192f77c3 100644
--- a/delegate/src/GatherNd.hpp
+++ b/delegate/src/GatherNd.hpp
@@ -46,6 +46,7 @@ TfLiteStatus VisitGatherNdOperator(DelegateData& delegateData,
const armnn::TensorInfo& indicesTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteIndicesTensor);
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+ armnn::BackendId setBackend;
if (!delegateData.m_Network)
{
// Check if supported
@@ -55,6 +56,7 @@ TfLiteStatus VisitGatherNdOperator(DelegateData& delegateData,
IsGatherNdSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
indicesTensorInfo,
outputTensorInfo);
@@ -62,6 +64,7 @@ TfLiteStatus VisitGatherNdOperator(DelegateData& delegateData,
}
armnn::IConnectableLayer* layer = delegateData.m_Network->AddGatherNdLayer();
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
diff --git a/delegate/src/LogicalBinary.hpp b/delegate/src/LogicalBinary.hpp
index 562b5d3438..b6a8f5d5f6 100644
--- a/delegate/src/LogicalBinary.hpp
+++ b/delegate/src/LogicalBinary.hpp
@@ -52,6 +52,7 @@ TfLiteStatus VisitLogicalBinaryOperator(DelegateData& delegateData,
// Check if supported
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("LOGICAL_BINARY",
@@ -59,6 +60,7 @@ TfLiteStatus VisitLogicalBinaryOperator(DelegateData& delegateData,
IsLogicalBinarySupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo0,
inputTensorInfo1,
outputTensorInfo,
@@ -72,6 +74,7 @@ TfLiteStatus VisitLogicalBinaryOperator(DelegateData& delegateData,
}
armnn::IConnectableLayer* logicalBinaryLayer = delegateData.m_Network->AddLogicalBinaryLayer(desc);
+ logicalBinaryLayer->SetBackendId(setBackend);
ARMNN_ASSERT(logicalBinaryLayer != nullptr);
armnn::IOutputSlot& outputSlot = logicalBinaryLayer->GetOutputSlot(0);
diff --git a/delegate/src/Lstm.hpp b/delegate/src/Lstm.hpp
index 253cd2162d..8c1f877ec9 100644
--- a/delegate/src/Lstm.hpp
+++ b/delegate/src/Lstm.hpp
@@ -216,6 +216,7 @@ TfLiteStatus VisitLstmOperator(DelegateData& delegateData,
}
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("LSTM",
@@ -223,6 +224,7 @@ TfLiteStatus VisitLstmOperator(DelegateData& delegateData,
IsLstmSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outputStateInInfo,
cellStateInInfo,
@@ -241,6 +243,7 @@ TfLiteStatus VisitLstmOperator(DelegateData& delegateData,
}
armnn::IConnectableLayer* layer = delegateData.m_Network->AddLstmLayer(desc, params);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
layer->GetOutputSlot(0).SetTensorInfo(scratchBufferTensorInfo);
diff --git a/delegate/src/MultiLayerFacade.hpp b/delegate/src/MultiLayerFacade.hpp
index 2fdfc7082a..aa00be8f60 100644
--- a/delegate/src/MultiLayerFacade.hpp
+++ b/delegate/src/MultiLayerFacade.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -117,6 +117,8 @@ public:
virtual const armnn::BaseDescriptor& GetParameters() const override { return m_NullDescriptor; }
+ void SetBackendId(const armnn::BackendId& id) override {}
+
protected:
/// Retrieve the handles to the constant values stored by the layer.
/// @return A vector of the constant tensors stored by this layer.
diff --git a/delegate/src/Normalization.hpp b/delegate/src/Normalization.hpp
index 0933552973..d0db43ea7c 100644
--- a/delegate/src/Normalization.hpp
+++ b/delegate/src/Normalization.hpp
@@ -42,6 +42,7 @@ TfLiteStatus VisitL2NormalizationOperator(DelegateData& delegateData,
descriptor.m_DataLayout = armnn::DataLayout::NHWC;
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("L2_NORMALIZATION",
@@ -49,6 +50,7 @@ TfLiteStatus VisitL2NormalizationOperator(DelegateData& delegateData,
IsL2NormalizationSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outInfo,
descriptor);
@@ -62,6 +64,7 @@ TfLiteStatus VisitL2NormalizationOperator(DelegateData& delegateData,
// Add a L2Normalization layer
armnn::IConnectableLayer* layer = delegateData.m_Network->AddL2NormalizationLayer(descriptor);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
@@ -112,6 +115,7 @@ TfLiteStatus VisitLocalResponseNormalizationOperator(DelegateData& delegateData,
descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("NORMALIZATION",
@@ -119,6 +123,7 @@ TfLiteStatus VisitLocalResponseNormalizationOperator(DelegateData& delegateData,
IsNormalizationSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outInfo,
descriptor);
@@ -132,6 +137,7 @@ TfLiteStatus VisitLocalResponseNormalizationOperator(DelegateData& delegateData,
// Add a Normalization layer
armnn::IConnectableLayer* layer = delegateData.m_Network->AddNormalizationLayer(descriptor);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
diff --git a/delegate/src/Pack.hpp b/delegate/src/Pack.hpp
index 458c1744c3..57d3b460f5 100644
--- a/delegate/src/Pack.hpp
+++ b/delegate/src/Pack.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -74,6 +74,7 @@ TfLiteStatus VisitPackOperator(DelegateData& delegateData,
// Check if supported
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("STACK",
@@ -81,6 +82,7 @@ TfLiteStatus VisitPackOperator(DelegateData& delegateData,
IsStackSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputConstTensorInfos,
outputTensorInfo,
desc);
@@ -97,6 +99,7 @@ TfLiteStatus VisitPackOperator(DelegateData& delegateData,
// The TfLite Pack operator is equivalent to the ArmNN Stack operator
armnn::IConnectableLayer* layer = delegateData.m_Network->AddStackLayer(desc);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
// Connect the Constant Inputs
diff --git a/delegate/src/Pad.hpp b/delegate/src/Pad.hpp
index daedede18d..2ecf2a06d7 100644
--- a/delegate/src/Pad.hpp
+++ b/delegate/src/Pad.hpp
@@ -149,6 +149,7 @@ TfLiteStatus VisitPadOperator(DelegateData& delegateData,
}
}
+ armnn::BackendId setBackend;
if (!delegateData.m_Network)
{
bool isSupported = false;
@@ -157,6 +158,7 @@ TfLiteStatus VisitPadOperator(DelegateData& delegateData,
IsPadSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outputTensorInfo,
descriptor);
@@ -165,6 +167,7 @@ TfLiteStatus VisitPadOperator(DelegateData& delegateData,
}
armnn::IConnectableLayer* padLayer = delegateData.m_Network->AddPadLayer(descriptor);
+ padLayer->SetBackendId(setBackend);
ARMNN_ASSERT(padLayer != nullptr);
armnn::IOutputSlot& outputSlot = padLayer->GetOutputSlot(0);
diff --git a/delegate/src/Pooling.hpp b/delegate/src/Pooling.hpp
index dfe90cb1f9..824156742d 100644
--- a/delegate/src/Pooling.hpp
+++ b/delegate/src/Pooling.hpp
@@ -84,6 +84,7 @@ TfLiteStatus VisitPooling2dOperator(DelegateData& delegateData,
descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("POOLING_2D",
@@ -91,6 +92,7 @@ TfLiteStatus VisitPooling2dOperator(DelegateData& delegateData,
IsPooling2dSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outputTensorInfo,
descriptor);
@@ -103,6 +105,7 @@ TfLiteStatus VisitPooling2dOperator(DelegateData& delegateData,
}
armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling2dLayer(descriptor);
+ poolingLayer->SetBackendId(setBackend);
ARMNN_ASSERT(poolingLayer != nullptr);
armnn::IOutputSlot& outputSlot = poolingLayer->GetOutputSlot(0);
@@ -215,12 +218,14 @@ TfLiteStatus VisitPooling3dOperator(DelegateData& delegateData,
// Validate the output info.
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported) {
FORWARD_LAYER_SUPPORT_FUNC("POOLING_3D",
tfLiteContext,
IsPooling3dSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outputTensorInfo,
descriptor);
@@ -234,6 +239,7 @@ TfLiteStatus VisitPooling3dOperator(DelegateData& delegateData,
// Create the Layer
armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling3dLayer(descriptor);
+ poolingLayer->SetBackendId(setBackend);
ARMNN_ASSERT(poolingLayer != nullptr);
// Create and set output slots
diff --git a/delegate/src/Prelu.hpp b/delegate/src/Prelu.hpp
index 398abaf4cc..06e74ed635 100644
--- a/delegate/src/Prelu.hpp
+++ b/delegate/src/Prelu.hpp
@@ -29,6 +29,7 @@ TfLiteStatus ValidatePreluOperator(DelegateData& delegateData,
IsPreluSupported,
delegateData.m_Backends,
isSupported,
+ armnn::BackendId(),
inputInfo,
alphaInfo,
outputInfo);
diff --git a/delegate/src/Quantization.hpp b/delegate/src/Quantization.hpp
index 78713759fb..64f57de505 100644
--- a/delegate/src/Quantization.hpp
+++ b/delegate/src/Quantization.hpp
@@ -51,6 +51,7 @@ TfLiteStatus VisitDequantizeOperator(DelegateData& delegateData,
UpdateConstantTensorOutputs(inputTensorInfo, outputTensorInfo);
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("DEQUANTIZE",
@@ -58,6 +59,7 @@ TfLiteStatus VisitDequantizeOperator(DelegateData& delegateData,
IsDequantizeSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outputTensorInfo);
};
@@ -69,6 +71,7 @@ TfLiteStatus VisitDequantizeOperator(DelegateData& delegateData,
}
armnn::IConnectableLayer* dequantizeLayer = delegateData.m_Network->AddDequantizeLayer();
+ dequantizeLayer->SetBackendId(setBackend);
ARMNN_ASSERT(dequantizeLayer != nullptr);
armnn::IOutputSlot& outputSlot = dequantizeLayer->GetOutputSlot(0);
@@ -130,6 +133,7 @@ TfLiteStatus VisitQuantizeOperator(DelegateData& delegateData,
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("QUANTIZE",
@@ -137,6 +141,7 @@ TfLiteStatus VisitQuantizeOperator(DelegateData& delegateData,
IsQuantizeSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outputTensorInfo);
};
@@ -148,6 +153,7 @@ TfLiteStatus VisitQuantizeOperator(DelegateData& delegateData,
}
armnn::IConnectableLayer* quantizeLayer = delegateData.m_Network->AddQuantizeLayer();
+ quantizeLayer->SetBackendId(setBackend);
ARMNN_ASSERT(quantizeLayer != nullptr);
armnn::IOutputSlot& outputSlot = quantizeLayer->GetOutputSlot(0);
diff --git a/delegate/src/Redefine.hpp b/delegate/src/Redefine.hpp
index cdae719373..8f9a4e4ba0 100644
--- a/delegate/src/Redefine.hpp
+++ b/delegate/src/Redefine.hpp
@@ -44,6 +44,7 @@ TfLiteStatus VisitCastOperator(DelegateData& delegateData,
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("CAST",
@@ -51,6 +52,7 @@ TfLiteStatus VisitCastOperator(DelegateData& delegateData,
IsCastSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outInfo);
};
@@ -66,6 +68,7 @@ TfLiteStatus VisitCastOperator(DelegateData& delegateData,
// Add a Cast layer
armnn::IConnectableLayer* layer = delegateData.m_Network->AddCastLayer();
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
@@ -210,6 +213,7 @@ TfLiteStatus VisitReshapeOperator(DelegateData& delegateData,
}
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("RESHAPE",
@@ -217,6 +221,7 @@ TfLiteStatus VisitReshapeOperator(DelegateData& delegateData,
IsReshapeSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo0,
outInfo,
reshapeDesc);
@@ -229,6 +234,7 @@ TfLiteStatus VisitReshapeOperator(DelegateData& delegateData,
}
armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
diff --git a/delegate/src/Reduce.hpp b/delegate/src/Reduce.hpp
index 79f2f52185..3f4c118e3d 100644
--- a/delegate/src/Reduce.hpp
+++ b/delegate/src/Reduce.hpp
@@ -105,6 +105,7 @@ TfLiteStatus VisitReduceOperator(DelegateData& delegateData,
}
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("REDUCE",
@@ -112,6 +113,7 @@ TfLiteStatus VisitReduceOperator(DelegateData& delegateData,
IsReduceSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outInfo,
desc);
@@ -125,6 +127,7 @@ TfLiteStatus VisitReduceOperator(DelegateData& delegateData,
// Add an Reduce layer
armnn::IConnectableLayer* layer = delegateData.m_Network->AddReduceLayer(desc);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
diff --git a/delegate/src/Resize.hpp b/delegate/src/Resize.hpp
index b59006cdef..0cb15d30e4 100644
--- a/delegate/src/Resize.hpp
+++ b/delegate/src/Resize.hpp
@@ -33,6 +33,7 @@ TfLiteStatus ValidateResizeOperator(DelegateData& delegateData,
IsResizeSupported,
delegateData.m_Backends,
isSupported,
+ armnn::BackendId(),
inputInfo,
outputInfo,
descriptor);
diff --git a/delegate/src/Shape.hpp b/delegate/src/Shape.hpp
index 284dc9fbb7..625e6a88fb 100644
--- a/delegate/src/Shape.hpp
+++ b/delegate/src/Shape.hpp
@@ -52,6 +52,7 @@ TfLiteStatus VisitShapeOperator(DelegateData& delegateData,
}
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("SHAPE",
@@ -59,6 +60,7 @@ TfLiteStatus VisitShapeOperator(DelegateData& delegateData,
IsShapeSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outInfo);
};
@@ -74,6 +76,7 @@ TfLiteStatus VisitShapeOperator(DelegateData& delegateData,
// Add a Shape layer
armnn::IConnectableLayer* layer = delegateData.m_Network->AddShapeLayer();
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
diff --git a/delegate/src/SharedFunctions.cpp b/delegate/src/SharedFunctions.cpp
index ad5d3101a2..22f578a9d7 100644
--- a/delegate/src/SharedFunctions.cpp
+++ b/delegate/src/SharedFunctions.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -29,6 +29,7 @@ TfLiteStatus ValidateFloorOperator(DelegateData& delegateData,
IsFloorSupported,
delegateData.m_Backends,
isSupported,
+ armnn::BackendId(),
inputTensorInfo,
outInfo);
};
diff --git a/delegate/src/Slice.hpp b/delegate/src/Slice.hpp
index cbcb45ec65..d5712aefad 100644
--- a/delegate/src/Slice.hpp
+++ b/delegate/src/Slice.hpp
@@ -99,6 +99,7 @@ TfLiteStatus VisitSliceOperator(DelegateData& delegateData,
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("SLICE",
@@ -106,6 +107,7 @@ TfLiteStatus VisitSliceOperator(DelegateData& delegateData,
IsSliceSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outInfo,
descriptor);
@@ -117,8 +119,9 @@ TfLiteStatus VisitSliceOperator(DelegateData& delegateData,
return isSupported ? kTfLiteOk : kTfLiteError;
}
- // Add a StridedSlice layer
+ // Add a Slice layer
armnn::IConnectableLayer* layer = delegateData.m_Network->AddSliceLayer(descriptor);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
@@ -129,3 +132,4 @@ TfLiteStatus VisitSliceOperator(DelegateData& delegateData,
}
} // namespace armnnDelegate
+
diff --git a/delegate/src/Softmax.hpp b/delegate/src/Softmax.hpp
index efc1cbae16..738f542239 100644
--- a/delegate/src/Softmax.hpp
+++ b/delegate/src/Softmax.hpp
@@ -27,6 +27,7 @@ TfLiteStatus ValidateSoftmaxOperator(DelegateData& delegateData,
IsSoftmaxSupported,
delegateData.m_Backends,
isSupported,
+ armnn::BackendId(),
inputInfo,
outputTensorInfo,
descriptor);
@@ -46,6 +47,7 @@ TfLiteStatus ValidateLogSoftmaxOperator(DelegateData& delegateData,
IsLogSoftmaxSupported,
delegateData.m_Backends,
isSupported,
+ armnn::BackendId(),
inputInfo,
outputTensorInfo,
descriptor);
diff --git a/delegate/src/SpaceDepth.hpp b/delegate/src/SpaceDepth.hpp
index 593d0e7f87..2172d8678b 100644
--- a/delegate/src/SpaceDepth.hpp
+++ b/delegate/src/SpaceDepth.hpp
@@ -43,6 +43,7 @@ TfLiteStatus VisitSpaceToDepthOperator(DelegateData& delegateData,
descriptor.m_BlockSize = params->block_size;
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("SPACE_TO_DEPTH",
@@ -50,6 +51,7 @@ TfLiteStatus VisitSpaceToDepthOperator(DelegateData& delegateData,
IsSpaceToDepthSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outInfo,
descriptor);
@@ -63,6 +65,7 @@ TfLiteStatus VisitSpaceToDepthOperator(DelegateData& delegateData,
// Add a SpaceToDepth layer
armnn::IConnectableLayer* layer = delegateData.m_Network->AddSpaceToDepthLayer(descriptor);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
@@ -102,6 +105,7 @@ TfLiteStatus VisitDepthToSpaceOperator(DelegateData& delegateData,
descriptor.m_BlockSize = params->block_size;
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("DEPTH_TO_SPACE",
@@ -109,6 +113,7 @@ TfLiteStatus VisitDepthToSpaceOperator(DelegateData& delegateData,
IsDepthToSpaceSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outInfo,
descriptor);
@@ -122,6 +127,7 @@ TfLiteStatus VisitDepthToSpaceOperator(DelegateData& delegateData,
// Add a DepthToSpace layer
armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthToSpaceLayer(descriptor);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
diff --git a/delegate/src/Split.hpp b/delegate/src/Split.hpp
index a535585699..5c094b405b 100644
--- a/delegate/src/Split.hpp
+++ b/delegate/src/Split.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -113,6 +113,7 @@ TfLiteStatus VisitSplitOperator(DelegateData& delegateData,
splitDescriptor.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
}
+ armnn::BackendId setBackend;
if (!delegateData.m_Network)
{
// Check if supported
@@ -122,6 +123,7 @@ TfLiteStatus VisitSplitOperator(DelegateData& delegateData,
IsSplitterSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outputTensorInfos,
splitDescriptor);
@@ -129,6 +131,7 @@ TfLiteStatus VisitSplitOperator(DelegateData& delegateData,
}
armnn::IConnectableLayer* layer = delegateData.m_Network->AddSplitterLayer(splitDescriptor);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
@@ -305,6 +308,7 @@ TfLiteStatus VisitSplitVOperator(DelegateData& delegateData,
accumSplit += splitSize;
}
+ armnn::BackendId setBackend;
if (!delegateData.m_Network)
{
// Check if supported
@@ -314,6 +318,7 @@ TfLiteStatus VisitSplitVOperator(DelegateData& delegateData,
IsSplitterSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outputTensorInfos,
splitDescriptor);
@@ -321,6 +326,7 @@ TfLiteStatus VisitSplitVOperator(DelegateData& delegateData,
}
armnn::IConnectableLayer* layer = delegateData.m_Network->AddSplitterLayer(splitDescriptor);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
diff --git a/delegate/src/StridedSlice.hpp b/delegate/src/StridedSlice.hpp
index 515c819ffe..d2c4d5da3a 100644
--- a/delegate/src/StridedSlice.hpp
+++ b/delegate/src/StridedSlice.hpp
@@ -114,6 +114,7 @@ TfLiteStatus VisitStridedSliceOperator(DelegateData& delegateData,
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("STRIDED_SLICE",
@@ -121,6 +122,7 @@ TfLiteStatus VisitStridedSliceOperator(DelegateData& delegateData,
IsStridedSliceSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outInfo,
descriptor);
@@ -134,6 +136,7 @@ TfLiteStatus VisitStridedSliceOperator(DelegateData& delegateData,
// Add a StridedSlice layer
armnn::IConnectableLayer* layer = delegateData.m_Network->AddStridedSliceLayer(descriptor);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
@@ -144,3 +147,4 @@ TfLiteStatus VisitStridedSliceOperator(DelegateData& delegateData,
}
} // namespace armnnDelegate
+
diff --git a/delegate/src/Transpose.hpp b/delegate/src/Transpose.hpp
index 80bb12254e..15c53101f2 100644
--- a/delegate/src/Transpose.hpp
+++ b/delegate/src/Transpose.hpp
@@ -71,7 +71,7 @@ TfLiteStatus VisitTransposeOperator(DelegateData& delegateData,
static_cast<armnn::PermutationVector::SizeType>(numEl)));
bool isSupported = false;
-
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("TRANSPOSE",
@@ -79,6 +79,7 @@ TfLiteStatus VisitTransposeOperator(DelegateData& delegateData,
IsTransposeSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo0,
outputTensorInfo,
descriptor);
@@ -91,6 +92,7 @@ TfLiteStatus VisitTransposeOperator(DelegateData& delegateData,
}
armnn::IConnectableLayer* transposeLayer = delegateData.m_Network->AddTransposeLayer(descriptor);
+ transposeLayer->SetBackendId(setBackend);
ARMNN_ASSERT(transposeLayer != nullptr);
ARMNN_ASSERT(transposeLayer->GetNumInputSlots() == 1); // permutation vector given to descriptor object
diff --git a/delegate/src/UnidirectionalSequenceLstm.hpp b/delegate/src/UnidirectionalSequenceLstm.hpp
index 64ed778231..9408397587 100644
--- a/delegate/src/UnidirectionalSequenceLstm.hpp
+++ b/delegate/src/UnidirectionalSequenceLstm.hpp
@@ -253,6 +253,7 @@ TfLiteStatus VisitUnidirectionalSequenceLstmOperator(DelegateData& delegateData,
}
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC("UNIDIRECTIONAL_SEQUENCE_LSTM",
@@ -260,6 +261,7 @@ TfLiteStatus VisitUnidirectionalSequenceLstmOperator(DelegateData& delegateData,
IsUnidirectionalSequenceLstmSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo,
outputStateInInfo,
cellStateInInfo,
@@ -277,6 +279,7 @@ TfLiteStatus VisitUnidirectionalSequenceLstmOperator(DelegateData& delegateData,
}
armnn::IConnectableLayer* layer = delegateData.m_Network->AddUnidirectionalSequenceLstmLayer(desc, params);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
layer->GetOutputSlot(0).SetTensorInfo(outputStateOutTensorInfo);
diff --git a/delegate/src/Unpack.hpp b/delegate/src/Unpack.hpp
index aaea00532f..ad541adccc 100644
--- a/delegate/src/Unpack.hpp
+++ b/delegate/src/Unpack.hpp
@@ -133,6 +133,7 @@ TfLiteStatus VisitUnpackOperator(DelegateData& delegateData,
std::vector<std::reference_wrapper<armnn::TensorInfo>> splitterOutputTensorInfos(splitterOutputs.begin(),
splitterOutputs.end());
+ armnn::BackendId setBackendSplit;
if (!delegateData.m_Network)
{
// Check if splitter is supported
@@ -142,6 +143,7 @@ TfLiteStatus VisitUnpackOperator(DelegateData& delegateData,
IsSplitterSupported,
delegateData.m_Backends,
isSupported,
+ setBackendSplit,
inputTensorInfo,
splitterOutputTensorInfos,
splitDesc);
@@ -153,6 +155,7 @@ TfLiteStatus VisitUnpackOperator(DelegateData& delegateData,
armnn::ReshapeDescriptor reshapeDescriptor;
reshapeDescriptor.m_TargetShape = outputTensorInfos[0].get().GetShape();
+ armnn::BackendId setBackendReshape;
if (!delegateData.m_Network)
{
bool isSupported = false;
@@ -161,6 +164,7 @@ TfLiteStatus VisitUnpackOperator(DelegateData& delegateData,
IsReshapeSupported,
delegateData.m_Backends,
isSupported,
+ setBackendReshape,
splitterOutputTensorInfos[0],
outputTensorInfos[0],
reshapeDescriptor);
@@ -171,6 +175,7 @@ TfLiteStatus VisitUnpackOperator(DelegateData& delegateData,
armnn::IConnectableLayer* splitterLayer = delegateData.m_Network->AddSplitterLayer(splitDesc,
splitterLayerName.c_str());
+ splitterLayer->SetBackendId(setBackendSplit);
ARMNN_ASSERT(splitterLayer != nullptr);
for (unsigned int k = 0; k < splitterLayer->GetNumOutputSlots(); ++k)
@@ -187,6 +192,7 @@ TfLiteStatus VisitUnpackOperator(DelegateData& delegateData,
std::string reshapeLayerName("Unpack Reshape");
armnn::IConnectableLayer* reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor,
reshapeLayerName.c_str());
+ reshapeLayer->SetBackendId(setBackendReshape);
ARMNN_ASSERT(reshapeLayer != nullptr);
splitterLayer->GetOutputSlot(outputIndex).SetTensorInfo(splitterOutputTensorInfos[outputIndex]);
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index 2bb9ad91f3..c9c8a04656 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -98,7 +98,11 @@ public:
/// Apply a visitor to this layer
virtual void ExecuteStrategy(IStrategy& strategy) const = 0;
- /// Provide a hint for the optimizer as to which backend to prefer for this layer
+ /// Provide a hint for the optimizer as to which backend to prefer for this layer.
+ /// By providing a BackendSelectionHint there is no guarantee the input backend supports that layer.
+ /// If IsLayerSupported() returns false with the backend hint, we default to calling IsLayerSupported()
+ /// on the BackendPreferences vector. Use SetBackendId() if we can guarantee a backend supports that
+ /// layer (IsLayerSupported returns true for a specific backend).
virtual void BackendSelectionHint(Optional<BackendId> backend) = 0;
/// Returns the armnn::LayerType of this layer
@@ -111,6 +115,12 @@ public:
/// the BaseDescriptor IsNull function is invoked.
virtual const BaseDescriptor& GetParameters() const = 0;
+ /// Set the backend of the IConnectableLayer.
+ /// By using SetBackendId() we guarantee that the input backend supports that
+ /// layer (IsLayerSupported returns true for a specific backend). If there is
+ /// no guarantee the input backend supports that layer use BackendSelectionHint().
+ virtual void SetBackendId(const BackendId& id) = 0;
+
using ConstantTensors = std::vector<std::reference_wrapper<std::shared_ptr<ConstTensorHandle>>>;
// Returns ConstantTensors of this Layer if it has any, otherwise returns empty vector.
diff --git a/include/armnn/Version.hpp b/include/armnn/Version.hpp
index 7fdb20ade5..aedd4a0c0b 100644
--- a/include/armnn/Version.hpp
+++ b/include/armnn/Version.hpp
@@ -10,7 +10,7 @@
#define STRINGIFY_MACRO(s) #s
// ArmNN version components
-#define ARMNN_MAJOR_VERSION 31
+#define ARMNN_MAJOR_VERSION 32
#define ARMNN_MINOR_VERSION 0
#define ARMNN_PATCH_VERSION 0
diff --git a/python/pyarmnn/README.md b/python/pyarmnn/README.md
index 0d2c511a17..3962e11395 100644
--- a/python/pyarmnn/README.md
+++ b/python/pyarmnn/README.md
@@ -69,8 +69,8 @@ PyArmNN can be distributed as a source package or a binary package (wheel).
Binary package is platform dependent, the name of the package will indicate the platform it was built for, e.g.:
-* Linux x86 64bit machine: pyarmnn-31.0.0-cp36-cp36m-*linux_x86_64*.whl
-* Linux Aarch 64 bit machine: pyarmnn-31.0.0-cp36-cp36m-*linux_aarch64*.whl
+* Linux x86 64bit machine: pyarmnn-32.0.0-cp36-cp36m-*linux_x86_64*.whl
+* Linux Aarch 64 bit machine: pyarmnn-32.0.0-cp36-cp36m-*linux_aarch64*.whl
The source package is platform independent but installation involves compilation of Arm NN python extension. You will need to have g++ compatible with C++ 14 standard and a python development library installed on the build machine.
@@ -110,7 +110,7 @@ $ pip show pyarmnn
You can also verify it by running the following and getting output similar to below:
```bash
$ python -c "import pyarmnn as ann;print(ann.GetVersion())"
-'31.0.0'
+'32.0.0'
```
# PyArmNN API overview
diff --git a/python/pyarmnn/examples/image_classification/README.md b/python/pyarmnn/examples/image_classification/README.md
index 04718e2bf4..fa0f89eba0 100644
--- a/python/pyarmnn/examples/image_classification/README.md
+++ b/python/pyarmnn/examples/image_classification/README.md
@@ -20,7 +20,7 @@ $ pip show pyarmnn
You can also verify it by running the following and getting output similar to below:
```bash
$ python -c "import pyarmnn as ann;print(ann.GetVersion())"
-'31.0.0'
+'32.0.0'
```
##### Dependencies
diff --git a/python/pyarmnn/examples/keyword_spotting/README.md b/python/pyarmnn/examples/keyword_spotting/README.md
index 98158e6c03..905cae1070 100644
--- a/python/pyarmnn/examples/keyword_spotting/README.md
+++ b/python/pyarmnn/examples/keyword_spotting/README.md
@@ -18,7 +18,7 @@ You can also verify it by running the following and getting output similar to be
```bash
$ python -c "import pyarmnn as ann;print(ann.GetVersion())"
-'31.0.0'
+'32.0.0'
```
### Dependencies
diff --git a/python/pyarmnn/examples/object_detection/README.md b/python/pyarmnn/examples/object_detection/README.md
index 73bafb6038..3c4b100fd4 100644
--- a/python/pyarmnn/examples/object_detection/README.md
+++ b/python/pyarmnn/examples/object_detection/README.md
@@ -54,7 +54,7 @@ $ pip show pyarmnn
You can also verify it by running the following and getting output similar to below:
```bash
$ python -c "import pyarmnn as ann;print(ann.GetVersion())"
-'31.0.0'
+'32.0.0'
```
##### Dependencies
diff --git a/python/pyarmnn/examples/speech_recognition/README.md b/python/pyarmnn/examples/speech_recognition/README.md
index e442aad591..af0196f4bf 100644
--- a/python/pyarmnn/examples/speech_recognition/README.md
+++ b/python/pyarmnn/examples/speech_recognition/README.md
@@ -18,7 +18,7 @@ You can also verify it by running the following and getting output similar to be
```bash
$ python -c "import pyarmnn as ann;print(ann.GetVersion())"
-'31.0.0'
+'32.0.0'
```
### Dependencies
diff --git a/python/pyarmnn/src/pyarmnn/_version.py b/python/pyarmnn/src/pyarmnn/_version.py
index d68a893e9c..4501f88ab5 100644
--- a/python/pyarmnn/src/pyarmnn/_version.py
+++ b/python/pyarmnn/src/pyarmnn/_version.py
@@ -3,7 +3,7 @@
# SPDX-License-Identifier: MIT
import os
-version_info = (31, 0, 0)
+version_info = (32, 0, 0)
__dev_version_env = os.getenv("PYARMNN_DEV_VER", "")
@@ -24,7 +24,7 @@ def check_armnn_version(installed_armnn_version: str, expected_armnn_version: st
"""Compares expected Arm NN version and Arm NN version used to build the package.
Args:
- installed_armnn_version (str): Arm NN version used to generate the package (e.g. 31.0.0)
+ installed_armnn_version (str): Arm NN version used to generate the package (e.g. 32.0.0)
expected_armnn_version (str): Expected Arm NN version
Returns:
diff --git a/python/pyarmnn/test/test_setup.py b/python/pyarmnn/test/test_setup.py
index ada96ccad4..8275a537b4 100644
--- a/python/pyarmnn/test/test_setup.py
+++ b/python/pyarmnn/test/test_setup.py
@@ -87,15 +87,15 @@ def test_gcc_serch_path():
def test_armnn_version():
- check_armnn_version('31.0.0', '31.0.0')
+ check_armnn_version('32.0.0', '32.0.0')
def test_incorrect_armnn_version():
with pytest.raises(AssertionError) as err:
- check_armnn_version('31.0.0', '31.1.0')
+ check_armnn_version('32.0.0', '32.1.0')
- assert 'Expected ArmNN version is 31.1.0 but installed ArmNN version is 31.0.0' in str(err.value)
+ assert 'Expected ArmNN version is 32.1.0 but installed ArmNN version is 32.0.0' in str(err.value)
def test_armnn_version_patch_does_not_matter():
- check_armnn_version('31.0.0', '31.0.1')
+ check_armnn_version('32.0.0', '32.0.1')
diff --git a/python/pyarmnn/test/test_version.py b/python/pyarmnn/test/test_version.py
index f68adff0c7..145fc3bc04 100644
--- a/python/pyarmnn/test/test_version.py
+++ b/python/pyarmnn/test/test_version.py
@@ -18,7 +18,7 @@ def test_dev_version():
importlib.reload(v)
- assert "31.0.0.dev1" == v.__version__
+ assert "32.0.0.dev1" == v.__version__
del os.environ["PYARMNN_DEV_VER"]
del v
@@ -30,7 +30,7 @@ def test_arm_version_not_affected():
importlib.reload(v)
- assert "31.0.0" == v.__arm_ml_version__
+ assert "32.0.0" == v.__arm_ml_version__
del os.environ["PYARMNN_DEV_VER"]
del v
diff --git a/shim/sl/canonical/ConversionUtils.cpp b/shim/sl/canonical/ConversionUtils.cpp
index f48af32e21..b64854856e 100644
--- a/shim/sl/canonical/ConversionUtils.cpp
+++ b/shim/sl/canonical/ConversionUtils.cpp
@@ -256,6 +256,7 @@ LayerInputHandle ConvertToLayerInputHandle(const Operation& operation,
IsInputSupported,
data.m_Backends,
isInputSupported,
+ armnn::BackendId(),
operandTensorInfo);
if (!isInputSupported)
@@ -292,10 +293,12 @@ LayerInputHandle ConvertToLayerInputHandle(const Operation& operation,
if (tensorPin.IsValid())
{
bool isSupported = false;
+ armnn::BackendId setBackend;
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsConstantSupported,
data.m_Backends,
isSupported,
+ setBackend,
tensorPin.GetConstTensor().GetInfo());
if (!isSupported)
{
@@ -304,6 +307,7 @@ LayerInputHandle ConvertToLayerInputHandle(const Operation& operation,
armnn::IConnectableLayer* constantLayer =
data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
+ constantLayer->SetBackendId(setBackend);
armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
armnn::TensorInfo constantTensorInfo = tensorPin.GetConstTensor().GetInfo();
outputSlot.SetTensorInfo(constantTensorInfo);
@@ -455,13 +459,14 @@ bool ConvertPooling2d(const Operation& operation,
}
bool isSupported = false;
-
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsPooling2dSupported,
data.m_Backends,
isSupported,
+ setBackend,
inputInfo,
outputInfo,
desc);
@@ -483,6 +488,7 @@ bool ConvertPooling2d(const Operation& operation,
}
armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
+ pooling2dLayer->SetBackendId(setBackend);
if (!pooling2dLayer)
{
return Fail("%s: AddPooling2dLayer failed", __func__);
@@ -547,12 +553,14 @@ bool ConvertReduce(const Operation& operation,
}
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsReduceSupported,
data.m_Backends,
isSupported,
+ setBackend,
inputInfo,
outputInfo,
descriptor);
@@ -573,6 +581,7 @@ bool ConvertReduce(const Operation& operation,
}
armnn::IConnectableLayer* const layer = data.m_Network->AddReduceLayer(descriptor);
+ layer->SetBackendId(setBackend);
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
@@ -601,13 +610,14 @@ bool ConvertToActivation(const Operation& operation,
const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
bool isSupported = false;
-
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsActivationSupported,
data.m_Backends,
isSupported,
+ setBackend,
input.GetTensorInfo(),
outInfo,
activationDesc);
@@ -628,6 +638,7 @@ bool ConvertToActivation(const Operation& operation,
}
armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
@@ -925,10 +936,12 @@ armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
}
bool isSupported = false;
+ armnn::BackendId setBackend;
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsActivationSupported,
data.m_Backends,
isSupported,
+ setBackend,
prevLayer->GetOutputSlot(0).GetTensorInfo(),
tensorInfo,
activationDesc);
@@ -938,6 +951,7 @@ armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
}
activationLayer = data.m_Network->AddActivationLayer(activationDesc);
+ activationLayer->SetBackendId(setBackend);
prevLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
activationLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
diff --git a/shim/sl/canonical/ConversionUtils.hpp b/shim/sl/canonical/ConversionUtils.hpp
index beee00d11a..91a8e3080c 100644
--- a/shim/sl/canonical/ConversionUtils.hpp
+++ b/shim/sl/canonical/ConversionUtils.hpp
@@ -150,7 +150,7 @@ static bool Fail(const char* formatStr, Args&&... args)
// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
-#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
+#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, setBackend, ...) \
try \
{ \
for (auto&& backendId : backends) \
@@ -163,6 +163,7 @@ try \
layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
if (supported) \
{ \
+ setBackend = backendId; \
break; \
} \
else \
@@ -322,10 +323,12 @@ bool BroadcastTensor(LayerInputHandle& input0,
armnn::ReshapeDescriptor reshapeDescriptor;
bool isSupported = false;
+ armnn::BackendId setBackend;
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsReshapeSupported,
data.m_Backends,
isSupported,
+ setBackend,
smallInfo,
reshapedInfo,
reshapeDescriptor);
@@ -336,6 +339,7 @@ bool BroadcastTensor(LayerInputHandle& input0,
ARMNN_ASSERT(data.m_Network != nullptr);
armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
+ reshapeLayer.SetBackendId(setBackend);
if (input0IsSmaller)
{
@@ -527,7 +531,8 @@ inline bool RequiresReshape(armnn::TensorShape & inputShape)
inline void SwizzleInputs(armnn::INetwork& network,
std::vector<LayerInputHandle>& inputs,
std::vector<armnn::TensorShape>& inputShapes,
- const armnn::PermutationVector& mapping)
+ const armnn::PermutationVector& mapping,
+ std::vector<armnn::BackendId>& setBackends)
{
if (!mapping.IsEqual(IdentityPermutation4D))
{
@@ -536,6 +541,7 @@ inline void SwizzleInputs(armnn::INetwork& network,
{
// add swizzle layer
armnn::IConnectableLayer& swizzleLayer = AddTransposeLayer(network, inputs[i], mapping);
+ swizzleLayer.SetBackendId(setBackends[i]);
auto& outputSlot = swizzleLayer.GetOutputSlot(0);
auto& outputInfo = outputSlot.GetTensorInfo();
// replace inputs with the swizzled ones
@@ -553,6 +559,7 @@ bool TransposeInputTensors(ConversionData& data,
// If we have a IdentityPermutation4D or IdentityPermutation3D then we are not permuting
if (!mapping.IsEqual(IdentityPermutation4D) && !mapping.IsEqual(IdentityPermutation3D))
{
+ std::vector<armnn::BackendId> setBackendsVec;
armnn::TensorInfo outputTransposeInfo;
size_t nInputs = inputs.size();
for (size_t i=0; i<nInputs; ++i)
@@ -563,20 +570,23 @@ bool TransposeInputTensors(ConversionData& data,
outputTransposeInfo = armnnUtils::TransposeTensorShape(inputs[i].GetTensorInfo(), mapping);
bool isSupported = false;
+ armnn::BackendId setBackend;
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsTransposeSupported,
data.m_Backends,
isSupported,
+ setBackend,
inputs[i].GetTensorInfo(),
outputTransposeInfo,
transposeDesc);
+ setBackendsVec.push_back(setBackend);
if (!isSupported)
{
return false;
}
}
- SwizzleInputs(*data.m_Network, inputs, inputShapes, mapping);
+ SwizzleInputs(*data.m_Network, inputs, inputShapes, mapping, setBackendsVec);
}
return true;
}
diff --git a/shim/sl/canonical/Converter.cpp b/shim/sl/canonical/Converter.cpp
index 8885fafe53..be052a6faa 100644
--- a/shim/sl/canonical/Converter.cpp
+++ b/shim/sl/canonical/Converter.cpp
@@ -206,12 +206,14 @@ bool Converter::ConvertAdd(const Operation& operation, const Model& model, Conve
const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsAdditionSupported,
data.m_Backends,
isSupported,
+ setBackend,
inputInfo0,
inputInfo1,
outputInfo);
@@ -232,6 +234,7 @@ bool Converter::ConvertAdd(const Operation& operation, const Model& model, Conve
}
armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
+ startLayer->SetBackendId(setBackend);
bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
if (!isReshapeSupported)
@@ -290,13 +293,14 @@ bool Converter::ConvertArgMinMax(const Operation& operation,
descriptor.m_Axis = axis;
bool isSupported = false;
-
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsArgMinMaxSupported,
data.m_Backends,
isSupported,
+ setBackend,
inputInfo0,
outputInfo,
descriptor);
@@ -317,6 +321,7 @@ bool Converter::ConvertArgMinMax(const Operation& operation,
}
armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
+ layer->SetBackendId(setBackend);
assert(layer != nullptr);
input0.Connect(layer->GetInputSlot(0));
@@ -391,12 +396,14 @@ bool Converter::ConvertBatchMatMul(const Operation& operation, const Model& mode
batchMatMulDesc.m_TransposeY = GetOptionalBool(operation, 3, model, data);
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsBatchMatMulSupported,
data.m_Backends,
isSupported,
+ setBackend,
inputInfo0,
inputInfo1,
outputInfo,
@@ -419,6 +426,7 @@ bool Converter::ConvertBatchMatMul(const Operation& operation, const Model& mode
}
armnn::IConnectableLayer* const layer = data.m_Network->AddBatchMatMulLayer(batchMatMulDesc);
+ layer->SetBackendId(setBackend);
assert(layer != nullptr);
input0.Connect(layer->GetInputSlot(0));
input1.Connect(layer->GetInputSlot(1));
@@ -482,12 +490,14 @@ bool Converter::ConvertBatchToSpaceNd(const Operation& operation, const Model& m
batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsBatchToSpaceNdSupported,
data.m_Backends,
isSupported,
+ setBackend,
inputInfo,
outputInfo,
batchToSpaceNdDesc);
@@ -509,6 +519,7 @@ bool Converter::ConvertBatchToSpaceNd(const Operation& operation, const Model& m
}
armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
+ layer->SetBackendId(setBackend);
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
@@ -536,13 +547,14 @@ bool Converter::ConvertCast(const Operation& operation, const Model& model, Conv
const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
bool isSupported = false;
-
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsCastSupported,
data.m_Backends,
isSupported,
+ setBackend,
inputInfo,
outputInfo);
};
@@ -562,6 +574,7 @@ bool Converter::ConvertCast(const Operation& operation, const Model& model, Conv
}
IConnectableLayer* layer = data.m_Network->AddCastLayer();
+ layer->SetBackendId(setBackend);
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
@@ -597,12 +610,14 @@ bool Converter::ConvertComparison(const Operation& operation,
ComparisonDescriptor descriptor(comparisonOperation);
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsComparisonSupported,
data.m_Backends,
isSupported,
+ setBackend,
inputInfo0,
inputInfo1,
outputInfo,
@@ -624,6 +639,7 @@ bool Converter::ConvertComparison(const Operation& operation,
}
IConnectableLayer* layer = data.m_Network->AddComparisonLayer(descriptor);
+ layer->SetBackendId(setBackend);
assert(layer != nullptr);
bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
@@ -735,10 +751,12 @@ bool Converter::ConvertConcatenation(const Operation& operation, const Model& mo
reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
bool isSupported = false;
+ armnn::BackendId setBackendReshape;
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsReshapeSupported,
data.m_Backends,
isSupported,
+ setBackendReshape,
operandInputHandle.GetTensorInfo(),
reshapeInfo,
reshapeDescriptor);
@@ -748,6 +766,7 @@ bool Converter::ConvertConcatenation(const Operation& operation, const Model& mo
return false;
}
armnn::IConnectableLayer& newReshape = AddReshapeLayer(*data.m_Network, operandInputHandle, reshapeInfo);
+ newReshape.SetBackendId(setBackendReshape);
// Point to the reshape operation rather then the input operation
operandShape = reshapeInfo.GetShape();
@@ -850,9 +869,16 @@ bool Converter::ConvertConcatenation(const Operation& operation, const Model& mo
[](const LayerInputHandle& h)->const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
bool isSupported = false;
+ armnn::BackendId setBackendConcat;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported){
- FORWARD_LAYER_SUPPORT_FUNC(__func__, IsConcatSupported, data.m_Backends, isSupported, inputTensorInfos,
- outputInfo, concatDescriptor);
+ FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ IsConcatSupported,
+ data.m_Backends,
+ isSupported,
+ setBackendConcat,
+ inputTensorInfos,
+ outputInfo,
+ concatDescriptor);
};
if (!isDynamicTensor)
@@ -870,6 +896,7 @@ bool Converter::ConvertConcatenation(const Operation& operation, const Model& mo
}
armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
+ layer->SetBackendId(setBackendConcat);
assert(layer != nullptr);
layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
// Connect inputs to the layer
@@ -889,10 +916,12 @@ bool Converter::ConvertConcatenation(const Operation& operation, const Model& mo
armnn::TensorInfo outputTransposeInfo = armnnUtils::TransposeTensorShape(inputTransposeInfo,
permutationPair.second);
isSupported = false;
+ armnn::BackendId setBackendTranspose;
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsTransposeSupported,
data.m_Backends,
isSupported,
+ setBackendTranspose,
inputTransposeInfo,
outputTransposeInfo,
transposeDesc);
@@ -903,6 +932,7 @@ bool Converter::ConvertConcatenation(const Operation& operation, const Model& mo
// Add permutation layer and connect the output to it, the permutation becomes the output layer
armnn::IConnectableLayer& deswizzleLayer = AddTransposeLayer(*data.m_Network, layer->GetOutputSlot(0),
permutationPair.second);
+ deswizzleLayer.SetBackendId(setBackendTranspose);
layer = &deswizzleLayer;
return true;
@@ -945,11 +975,13 @@ bool Converter::ConvertConcatenation(const Operation& operation, const Model& mo
armnn::TensorInfo concatInfo = layer->GetOutputSlot(0).GetTensorInfo();
isSupported = false;
+ armnn::BackendId setBackendReshape2;
auto validateReshapeFunc = [&](const armnn::TensorInfo& afterConcatInfo, bool& isSupported){
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsReshapeSupported,
data.m_Backends,
isSupported,
+ setBackendReshape2,
concatInfo,
afterConcatInfo,
reshapeDescriptor);
@@ -969,6 +1001,7 @@ bool Converter::ConvertConcatenation(const Operation& operation, const Model& mo
return false;
}
layer = &AddReshapeLayer(*data.m_Network, layer->GetOutputSlot(0), afterConcatInfo);
+ layer->SetBackendId(setBackendReshape2);
return SetupAndTrackLayerOutputSlot(operation,
0,
*layer,
@@ -1109,11 +1142,13 @@ bool Converter::ConvertConv2d(const Operation& operation, const Model& model, Co
VLOG(DRIVER) << "Converter::ConvertConv2d(): Weights and Biases are as INPUTS.";
}
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) {
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsConvolution2dSupported,
data.m_Backends,
isSupported,
+ setBackend,
inputInfo,
outputInfo,
desc,
@@ -1141,6 +1176,7 @@ bool Converter::ConvertConv2d(const Operation& operation, const Model& model, Co
}
armnn::IConnectableLayer* startLayer = data.m_Network->AddConvolution2dLayer(desc);
+ startLayer->SetBackendId(setBackend);
if (!startLayer)
{
@@ -1194,12 +1230,14 @@ bool Converter::ConvertDepthToSpace(const Operation& operation, const Model& mod
}
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsDepthToSpaceSupported,
data.m_Backends,
isSupported,
+ setBackend,
inputInfo,
outputInfo,
descriptor);
@@ -1220,6 +1258,7 @@ bool Converter::ConvertDepthToSpace(const Operation& operation, const Model& mod
}
armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
+ layer->SetBackendId(setBackend);
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
@@ -1355,11 +1394,13 @@ bool Converter::ConvertDepthwiseConv2d(const Operation& operation, const Model&
VLOG(DRIVER) << "Converter::ConvertDepthwiseConv2d(): Weights and Biases are as INPUTS.";
}
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) {
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsDepthwiseConvolutionSupported,
data.m_Backends,
isSupported,
+ setBackend,
inputInfo,
outputInfo,
desc,
@@ -1387,6 +1428,7 @@ bool Converter::ConvertDepthwiseConv2d(const Operation& operation, const Model&
}
armnn::IConnectableLayer* startLayer = data.m_Network->AddDepthwiseConvolution2dLayer(desc);
+ startLayer->SetBackendId(setBackend);
if (!startLayer)
{
@@ -1428,12 +1470,14 @@ bool Converter::ConvertDequantize(const Operation& operation, const Model& model
const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsDequantizeSupported,
data.m_Backends,
isSupported,
+ setBackend,
inputInfo,
outputInfo);
};
@@ -1453,6 +1497,7 @@ bool Converter::ConvertDequantize(const Operation& operation, const Model& model
}
armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
+ layer->SetBackendId(setBackend);
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
@@ -1488,12 +1533,14 @@ bool Converter::ConvertDiv(const Operation& operation, const Model& model, Conve
const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsDivisionSupported,
data.m_Backends,
isSupported,
+ setBackend,
input0.GetTensorInfo(),
input1.GetTensorInfo(),
outputInfo);
@@ -1514,6 +1561,7 @@ bool Converter::ConvertDiv(const Operation& operation, const Model& model, Conve
}
armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
+ startLayer->SetBackendId(setBackend);
bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
if (!isReshapeSupported)
@@ -1552,13 +1600,14 @@ bool Converter::ConvertElementwiseUnary(const Operation& operation,
ElementwiseUnaryDescriptor descriptor(unaryOperation);
bool isSupported = false;
-
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsElementwiseUnarySupported,
data.m_Backends,
isSupported,
+ setBackend,
inputInfo,
outputInfo,
descriptor);
@@ -1579,6 +1628,7 @@ bool Converter::ConvertElementwiseUnary(const Operation& operation,
}
IConnectableLayer* layer = data.m_Network->AddElementwiseUnaryLayer(descriptor);
+ layer->SetBackendId(setBackend);
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
@@ -1672,12 +1722,14 @@ bool Converter::ConvertExpandDims(const Operation& operation, const Model& model
reshapeDescriptor.m_TargetShape = targetShape;
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsReshapeSupported,
data.m_Backends,
isSupported,
+ setBackend,
input.GetTensorInfo(),
outputInfo,
reshapeDescriptor);
@@ -1702,6 +1754,7 @@ bool Converter::ConvertExpandDims(const Operation& operation, const Model& model
}
IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
+ layer->SetBackendId(setBackend);
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
@@ -1769,10 +1822,12 @@ bool Converter::ConvertFill(const Operation& operation, const Model& model, Conv
}
bool isSupported = false;
+ armnn::BackendId setBackend;
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsFillSupported,
data.m_Backends,
isSupported,
+ setBackend,
inputInfo,
outputInfo,
descriptor);
@@ -1782,6 +1837,7 @@ bool Converter::ConvertFill(const Operation& operation, const Model& model, Conv
}
IConnectableLayer* const layer = data.m_Network->AddFillLayer(descriptor);
+ layer->SetBackendId(setBackend);
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
@@ -1806,12 +1862,14 @@ bool Converter::ConvertFloor(const Operation& operation, const Model& model, Con
const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsFloorSupported,
data.m_Backends,
isSupported,
+ setBackend,
input.GetTensorInfo(),
outputInfo);
};
@@ -1831,6 +1889,7 @@ bool Converter::ConvertFloor(const Operation& operation, const Model& model, Con
}
armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
+ layer->SetBackendId(setBackend);
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
@@ -1912,6 +1971,7 @@ bool Converter::ConvertFullyConnected(const Operation& operation, const Model& m
desc.m_ConstantWeights = IsOperandConstant(*weightsOperand);
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
if (!VerifyFullyConnectedShapes(reshapedInfo.GetShape(),
@@ -1928,6 +1988,7 @@ bool Converter::ConvertFullyConnected(const Operation& operation, const Model& m
IsFullyConnectedSupported,
data.m_Backends,
isSupported,
+ setBackend,
reshapedInfo,
outputInfo,
weightsInfo,
@@ -1951,6 +2012,7 @@ bool Converter::ConvertFullyConnected(const Operation& operation, const Model& m
// Add FullyConnected layer. Weights and bias will be connected as constant layers or non const inputs.
armnn::IConnectableLayer* startLayer = data.m_Network->AddFullyConnectedLayer(desc);
+ startLayer->SetBackendId(setBackend);
if (inputInfo.GetNumDimensions() > 2U)
{
@@ -2022,12 +2084,14 @@ bool Converter::ConvertGather(const Operation& operation, const Model& model, Co
desc.m_Axis = axis;
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsGatherSupported,
data.m_Backends,
isSupported,
+ setBackend,
input.GetTensorInfo(),
indices.GetTensorInfo(),
outputInfo,
@@ -2049,6 +2113,7 @@ bool Converter::ConvertGather(const Operation& operation, const Model& model, Co
}
IConnectableLayer* layer = data.m_Network->AddGatherLayer(desc);
+ layer->SetBackendId(setBackend);
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
indices.Connect(layer->GetInputSlot(1));
@@ -2209,10 +2274,12 @@ bool Converter::ConvertGroupedConv2d(const Operation& operation, const Model& mo
}
bool isSupported = false;
+ armnn::BackendId setBackendSplit;
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsSplitterSupported,
data.m_Backends,
isSupported,
+ setBackendSplit,
inputInfo,
splitterOutputInfos,
splitterDesc);
@@ -2222,6 +2289,7 @@ bool Converter::ConvertGroupedConv2d(const Operation& operation, const Model& mo
}
IConnectableLayer* splitterLayer = data.m_Network->AddSplitterLayer(splitterDesc);
+ splitterLayer->SetBackendId(setBackendSplit);
if (!splitterLayer)
{
return Fail("%s: Failed to add SplitterLayer", __func__);
@@ -2305,12 +2373,14 @@ bool Converter::ConvertGroupedConv2d(const Operation& operation, const Model& mo
biasesDataOffset));
isSupported = false;
+ armnn::BackendId setBackendConv;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsConvolution2dSupported,
data.m_Backends,
isSupported,
+ setBackendConv,
groupInputInfo,
outputInfo,
desc,
@@ -2336,6 +2406,8 @@ bool Converter::ConvertGroupedConv2d(const Operation& operation, const Model& mo
IConnectableLayer* biasLayer = data.m_Network->AddConstantLayer(groupBiases);
IConnectableLayer* convLayer = data.m_Network->AddConvolution2dLayer(desc);
+ convLayer->SetBackendId(setBackendConv);
+
if (!convLayer)
{
return Fail("%s: AddConvolution2dLayer failed", __func__);
@@ -2384,10 +2456,12 @@ bool Converter::ConvertGroupedConv2d(const Operation& operation, const Model& mo
}
isSupported = false;
+ armnn::BackendId setBackendConcat;
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsConcatSupported,
data.m_Backends,
isSupported,
+ setBackendConcat,
std::vector<const TensorInfo*>(numGroups * channelMultiplier, &groupOutputInfo),
outputInfo,
concatDescriptor);
@@ -2398,6 +2472,7 @@ bool Converter::ConvertGroupedConv2d(const Operation& operation, const Model& mo
}
IConnectableLayer* concatLayer = data.m_Network->AddConcatLayer(concatDescriptor);
+ concatLayer->SetBackendId(setBackendConcat);
if (!concatLayer)
{
return Fail("%s: AddConcatLayer failed", __func__);
@@ -2488,12 +2563,14 @@ bool Converter::ConvertInstanceNormalization(const Operation& operation, const M
desc.m_DataLayout = OptionalDataLayout(operation, 4, model, data);
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsInstanceNormalizationSupported,
data.m_Backends,
isSupported,
+ setBackend,
input.GetTensorInfo(),
outputInfo,
desc);
@@ -2514,6 +2591,7 @@ bool Converter::ConvertInstanceNormalization(const Operation& operation, const M
}
IConnectableLayer* layer = data.m_Network->AddInstanceNormalizationLayer(desc);
+ layer->SetBackendId(setBackend);
input.Connect(layer->GetInputSlot(0));
return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
@@ -2552,12 +2630,14 @@ bool Converter::ConvertL2Normalization(const Operation& operation, const Model&
desc.m_DataLayout = armnn::DataLayout::NHWC;
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsL2NormalizationSupported,
data.m_Backends,
isSupported,
+ setBackend,
inputInfo,
outputInfo,
desc);
@@ -2578,6 +2658,7 @@ bool Converter::ConvertL2Normalization(const Operation& operation, const Model&
}
armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
+ layer->SetBackendId(setBackend);
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
@@ -2640,12 +2721,14 @@ bool Converter::ConvertLocalResponseNormalization(const Operation& operation,
descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsNormalizationSupported,
data.m_Backends,
isSupported,
+ setBackend,
inputInfo,
outputInfo,
descriptor);
@@ -2667,6 +2750,7 @@ bool Converter::ConvertLocalResponseNormalization(const Operation& operation,
armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
+ layer->SetBackendId(setBackend);
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
@@ -2703,13 +2787,14 @@ bool Converter::ConvertLogicalBinary(const Operation& operation,
LogicalBinaryDescriptor descriptor(logicalOperation);
bool isSupported = false;
-
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsLogicalBinarySupported,
data.m_Backends,
isSupported,
+ setBackend,
inputInfo0,
inputInfo1,
outputInfo,
@@ -2731,6 +2816,7 @@ bool Converter::ConvertLogicalBinary(const Operation& operation,
}
IConnectableLayer* layer = data.m_Network->AddLogicalBinaryLayer(descriptor);
+ layer->SetBackendId(setBackend);
assert(layer != nullptr);
bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
@@ -2808,12 +2894,14 @@ bool Converter::ConvertLogSoftmax(const Operation& operation, const Model& model
}
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsLogSoftmaxSupported,
data.m_Backends,
isSupported,
+ setBackend,
input.GetTensorInfo(),
outputInfo,
descriptor);
@@ -2834,6 +2922,7 @@ bool Converter::ConvertLogSoftmax(const Operation& operation, const Model& model
}
IConnectableLayer* layer = data.m_Network->AddLogSoftmaxLayer(descriptor);
+ layer->SetBackendId(setBackend);
if (!layer)
{
return Fail("%s: AddLogSoftmaxLayer() returned nullptr", __func__);
@@ -3193,12 +3282,14 @@ bool Converter::ConvertLstm(const Operation& operation, const Model& model, Conv
}
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsLstmSupported,
data.m_Backends,
isSupported,
+ setBackend,
inputInfo,
outputStateInInfo,
cellStateInInfo,
@@ -3231,6 +3322,7 @@ bool Converter::ConvertLstm(const Operation& operation, const Model& model, Conv
// Add the layer
IConnectableLayer* layer = data.m_Network->AddLstmLayer(desc, params, "Lstm");
+ layer->SetBackendId(setBackend);
input.Connect(layer->GetInputSlot(0));
outputStateIn.Connect(layer->GetInputSlot(1));
@@ -3283,12 +3375,14 @@ bool Converter::ConvertMaximum(const Operation& operation, const Model& model, C
const TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsMaximumSupported,
data.m_Backends,
isSupported,
+ setBackend,
input0.GetTensorInfo(),
input1.GetTensorInfo(),
outInfo);
@@ -3309,6 +3403,7 @@ bool Converter::ConvertMaximum(const Operation& operation, const Model& model, C
}
IConnectableLayer* layer = data.m_Network->AddMaximumLayer();
+ layer->SetBackendId(setBackend);
assert(layer != nullptr);
bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
if (!isReshapeSupported)
@@ -3370,12 +3465,14 @@ bool Converter::ConvertMean(const Operation& operation, const Model& model, Conv
descriptor.m_KeepDims = keepDims > 0;
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsMeanSupported,
data.m_Backends,
isSupported,
+ setBackend,
inputInfo,
outputInfo,
descriptor);
@@ -3396,6 +3493,7 @@ bool Converter::ConvertMean(const Operation& operation, const Model& model, Conv
}
armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
+ layer->SetBackendId(setBackend);
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
@@ -3423,12 +3521,14 @@ bool Converter::ConvertMinimum(const Operation& operation, const Model& model, C
const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsMinimumSupported,
data.m_Backends,
isSupported,
+ setBackend,
input0.GetTensorInfo(),
input1.GetTensorInfo(),
outputInfo);
@@ -3449,6 +3549,7 @@ bool Converter::ConvertMinimum(const Operation& operation, const Model& model, C
}
IConnectableLayer* const layer = data.m_Network->AddMinimumLayer();
+ layer->SetBackendId(setBackend);
assert(layer != nullptr);
bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
if (!isReshapeSupported)
@@ -3489,12 +3590,14 @@ bool Converter::ConvertMul(const Operation& operation, const Model& model, Conve
const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsMultiplicationSupported,
data.m_Backends,
isSupported,
+ setBackend,
input0.GetTensorInfo(),
input1.GetTensorInfo(),
outputInfo);
@@ -3515,6 +3618,7 @@ bool Converter::ConvertMul(const Operation& operation, const Model& model, Conve
}
armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
+ startLayer->SetBackendId(setBackend);
bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
if (!isReshapeSupported)
@@ -3564,12 +3668,14 @@ bool Converter::ConvertPad(const Operation& operation, const Model& model, Conve
const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsPadSupported,
data.m_Backends,
isSupported,
+ setBackend,
inputInfo,
outputInfo,
descriptor);
@@ -3590,6 +3696,7 @@ bool Converter::ConvertPad(const Operation& operation, const Model& model, Conve
}
armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
+ layer->SetBackendId(setBackend);
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
@@ -3666,12 +3773,14 @@ bool Converter::ConvertPadV2(const Operation& operation, const Model& model, Con
}
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsPadSupported,
data.m_Backends,
isSupported,
+ setBackend,
inputInfo,
outputInfo,
descriptor);
@@ -3692,6 +3801,7 @@ bool Converter::ConvertPadV2(const Operation& operation, const Model& model, Con
}
IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
+ layer->SetBackendId(setBackend);
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
@@ -3722,12 +3832,14 @@ bool Converter::ConvertPrelu(const Operation& operation, const Model& model, Con
const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsPreluSupported,
data.m_Backends,
isSupported,
+ setBackend,
inputInfo,
alphaInfo,
outputInfo);
@@ -3748,6 +3860,7 @@ bool Converter::ConvertPrelu(const Operation& operation, const Model& model, Con
}
IConnectableLayer* const layer = data.m_Network->AddPreluLayer();
+ layer->SetBackendId(setBackend);
if (!layer)
{
@@ -3782,12 +3895,14 @@ bool Converter::ConvertQuantize(const Operation& operation, const Model& model,
const TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsQuantizeSupported,
data.m_Backends,
isSupported,
+ setBackend,
input.GetTensorInfo(),
outputInfo);
};
@@ -3807,6 +3922,7 @@ bool Converter::ConvertQuantize(const Operation& operation, const Model& model,
}
IConnectableLayer* const layer = data.m_Network->AddQuantizeLayer();
+ layer->SetBackendId(setBackend);
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
@@ -4259,12 +4375,14 @@ bool Converter::ConvertQuantizedLstm(const Operation& operation, const Model& mo
// Check if the layer is supported
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& cellStateOutInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsQLstmSupported,
data.m_Backends,
isSupported,
+ setBackend,
inputInfo,
outputStatePrevTimeStepInfo,
cellStatePrevTimeStepInfo,
@@ -4295,6 +4413,7 @@ bool Converter::ConvertQuantizedLstm(const Operation& operation, const Model& mo
// Add the layer
IConnectableLayer* layer = data.m_Network->AddQLstmLayer(desc, params, "QLstm");
+ layer->SetBackendId(setBackend);
input.Connect(layer->GetInputSlot(0));
outputStatePrevTimeStep.Connect(layer->GetInputSlot(1));
@@ -4502,12 +4621,14 @@ bool Converter::ConvertQuantized16BitLstm(const Operation& operation, const Mode
paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsQuantizedLstmSupported,
data.m_Backends,
isSupported,
+ setBackend,
inputInfo,
previousCellStateInInfo,
previousOutputInInfo,
@@ -4534,6 +4655,7 @@ bool Converter::ConvertQuantized16BitLstm(const Operation& operation, const Mode
}
IConnectableLayer* const layer = data.m_Network->AddQuantizedLstmLayer(params, "QuantizedLstm");
+ layer->SetBackendId(setBackend);
input.Connect(layer->GetInputSlot(0));
previousCellStateIn.Connect(layer->GetInputSlot(1));
previousOutputIn.Connect(layer->GetInputSlot(2));
@@ -4580,10 +4702,12 @@ bool Converter::ConvertRank(const Operation& operation, const Model& model, Conv
}
bool isSupported = false;
+ armnn::BackendId setBackend;
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsRankSupported,
data.m_Backends,
isSupported,
+ setBackend,
input.GetTensorInfo(),
outInfo);
if (!isSupported)
@@ -4592,6 +4716,7 @@ bool Converter::ConvertRank(const Operation& operation, const Model& model, Conv
}
armnn::IConnectableLayer* layer = data.m_Network->AddRankLayer();
+ layer->SetBackendId(setBackend);
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
@@ -4620,13 +4745,14 @@ bool Converter::ConvertReLu(const Operation& operation, const Model& model, Conv
const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
bool isSupported = false;
-
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsActivationSupported,
data.m_Backends,
isSupported,
+ setBackend,
input.GetTensorInfo(),
outInfo,
desc);
@@ -4647,6 +4773,7 @@ bool Converter::ConvertReLu(const Operation& operation, const Model& model, Conv
}
armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(desc);
+ layer->SetBackendId(setBackend);
ARMNN_ASSERT(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
@@ -4724,12 +4851,14 @@ bool Converter::ConvertReshape(const Operation& operation, const Model& model, C
const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsReshapeSupported,
data.m_Backends,
isSupported,
+ setBackend,
input.GetTensorInfo(),
outputInfo,
reshapeDescriptor);
@@ -4750,6 +4879,7 @@ bool Converter::ConvertReshape(const Operation& operation, const Model& model, C
}
armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
+ layer->SetBackendId(setBackend);
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
@@ -4868,12 +4998,14 @@ bool Converter::ConvertResize(const Operation& operation,
descriptor.m_HalfPixelCenters = GetOptionalBool(operation, 5, model, data);
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsResizeSupported,
data.m_Backends,
isSupported,
+ setBackend,
inputInfo,
outputInfo,
descriptor);
@@ -4894,6 +5026,7 @@ bool Converter::ConvertResize(const Operation& operation,
}
IConnectableLayer* layer = data.m_Network->AddResizeLayer(descriptor);
+ layer->SetBackendId(setBackend);
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
@@ -4982,12 +5115,14 @@ bool Converter::ConvertSpaceToBatchNd(const Operation& operation, const Model& m
}
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo &outputInfo, bool &isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsSpaceToBatchNdSupported,
data.m_Backends,
isSupported,
+ setBackend,
inputInfo,
outputInfo,
descriptor);
@@ -5007,6 +5142,7 @@ bool Converter::ConvertSpaceToBatchNd(const Operation& operation, const Model& m
}
armnn::IConnectableLayer *const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
+ layer->SetBackendId(setBackend);
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
@@ -5050,12 +5186,14 @@ bool Converter::ConvertSpaceToDepth(const Operation& operation, const Model& mod
desc.m_DataLayout = OptionalDataLayout(operation, 2, model, data);
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsSpaceToDepthSupported,
data.m_Backends,
isSupported,
+ setBackend,
inputInfo,
outputInfo,
desc);
@@ -5076,6 +5214,7 @@ bool Converter::ConvertSpaceToDepth(const Operation& operation, const Model& mod
}
IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
+ layer->SetBackendId(setBackend);
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
@@ -5134,12 +5273,14 @@ bool Converter::ConvertSoftmax(const Operation& operation, const Model& model, C
}
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsSoftmaxSupported,
data.m_Backends,
isSupported,
+ setBackend,
input.GetTensorInfo(),
outputInfo,
desc);
@@ -5160,6 +5301,7 @@ bool Converter::ConvertSoftmax(const Operation& operation, const Model& model, C
}
IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
+ layer->SetBackendId(setBackend);
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
@@ -5195,12 +5337,14 @@ bool Converter::ConvertSub(const Operation& operation, const Model& model, Conve
const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsSubtractionSupported,
data.m_Backends,
isSupported,
+ setBackend,
input0.GetTensorInfo(),
input1.GetTensorInfo(),
outputInfo);
@@ -5221,6 +5365,7 @@ bool Converter::ConvertSub(const Operation& operation, const Model& model, Conve
}
armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
+ startLayer->SetBackendId(setBackend);
bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
if (!isReshapeSupported)
@@ -5413,12 +5558,14 @@ bool Converter::ConvertTransposeConv2d(const Operation& operation, const Model&
Optional<TensorInfo> biases(bias.GetInfo());
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsTransposeConvolution2dSupported,
data.m_Backends,
isSupported,
+ setBackend,
inputInfo,
outputInfo,
desc,
@@ -5441,6 +5588,7 @@ bool Converter::ConvertTransposeConv2d(const Operation& operation, const Model&
IConnectableLayer* startLayer =
data.m_Network->AddTransposeConvolution2dLayer(desc, weights, Optional<ConstTensor>(bias));
+ startLayer->SetBackendId(setBackend);
if (!startLayer)
{
return Fail("%s: AddTransposeConvolution2dLayer failed", __func__);
@@ -5526,10 +5674,12 @@ bool Converter::ConvertSqueeze(const Operation& operation, const Model& model, C
reshapeDesc.m_TargetShape = outputInfo.GetShape();
bool isSupported = false;
+ armnn::BackendId setBackend;
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsReshapeSupported,
data.m_Backends,
isSupported,
+ setBackend,
inputInfo,
outputInfo,
reshapeDesc);
@@ -5540,6 +5690,7 @@ bool Converter::ConvertSqueeze(const Operation& operation, const Model& model, C
}
armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
+ layer->SetBackendId(setBackend);
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
@@ -5623,12 +5774,14 @@ bool Converter::ConvertStridedSlice(const Operation& operation, const Model& mod
}
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsStridedSliceSupported,
data.m_Backends,
isSupported,
+ setBackend,
inputInfo,
outputInfo,
descriptor);
@@ -5672,6 +5825,7 @@ bool Converter::ConvertStridedSlice(const Operation& operation, const Model& mod
}
armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
+ layer->SetBackendId(setBackend);
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
@@ -5726,12 +5880,14 @@ bool Converter::ConvertTranspose(const Operation& operation, const Model& model,
const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
bool isSupported = false;
+ armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsTransposeSupported,
data.m_Backends,
isSupported,
+ setBackend,
inputInfo,
outputInfo,
transposeDesc);
@@ -5752,6 +5908,7 @@ bool Converter::ConvertTranspose(const Operation& operation, const Model& model,
}
armnn::IConnectableLayer* const layer = data.m_Network->AddTransposeLayer(transposeDesc);
+ layer->SetBackendId(setBackend);
assert(layer != nullptr);
input.Connect(layer->GetInputSlot(0));
diff --git a/src/armnn/Layer.hpp b/src/armnn/Layer.hpp
index b144c78889..aab5227b75 100644
--- a/src/armnn/Layer.hpp
+++ b/src/armnn/Layer.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -275,7 +275,7 @@ public:
DataType GetDataType() const;
const BackendId& GetBackendId() const { return m_BackendId; }
- void SetBackendId(const BackendId& id) { m_BackendId = id; }
+ void SetBackendId(const BackendId& id) override { m_BackendId = id; }
// Virtuals
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 6d3058c670..a61624fb0a 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -12,6 +12,7 @@
#include "BackendSettings.hpp"
#include "optimizations/All.hpp"
#include "armnnUtils/Filesystem.hpp"
+#include "armnn/utility/Timer.hpp"
#include <armnn/backends/TensorHandle.hpp>
#include <armnn/backends/WorkloadFactory.hpp>
@@ -766,6 +767,15 @@ OptimizationResult AttemptBackendAssignment(BackendSettings& backendSettings,
}
}
+inline std::vector<DataType> GetLayerInOutDatatype(const Layer* layer)
+{
+ DataType dataTypeIn = layer->GetNumInputSlots() == 0 ? DataType::Float32 :
+ layer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo().GetDataType();
+ DataType dataTypeOut = layer->GetNumOutputSlots() == 0 ? DataType::Float32 :
+ layer->GetOutputSlot(0).GetTensorInfo().GetDataType();
+ return {dataTypeIn, dataTypeOut};
+}
+
// Refactor to allow passing the IConnectableLayer* rather than Layer Iterator
// on Graph and SubgraphView which are different types.
void AssignBackendsIConnectable(OptimizedNetworkImpl* optNetObjPtr,
@@ -787,10 +797,7 @@ void AssignBackendsIConnectable(OptimizedNetworkImpl* optNetObjPtr,
return;
}
- DataType dataTypeIn = layer->GetNumInputSlots() == 0 ? DataType::Float32 :
- layer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo().GetDataType();
- DataType dataTypeOut = layer->GetNumOutputSlots() == 0 ? DataType::Float32 :
- layer->GetOutputSlot(0).GetTensorInfo().GetDataType();
+ std::vector<DataType> inOutDataType = GetLayerInOutDatatype(layer);
std::string reasonIfUnsupported;
bool found = false;
@@ -808,8 +815,8 @@ void AssignBackendsIConnectable(OptimizedNetworkImpl* optNetObjPtr,
optNetObjPtr->GetGraph(),
layer,
layer->GetBackendHint().value(),
- dataTypeIn,
- dataTypeOut,
+ inOutDataType[0],
+ inOutDataType[1],
availablePreferredBackends,
reasonIfUnsupported,
errMessages).IsOk())
@@ -832,8 +839,8 @@ void AssignBackendsIConnectable(OptimizedNetworkImpl* optNetObjPtr,
optNetObjPtr->GetGraph(),
layer,
backend,
- dataTypeIn,
- dataTypeOut,
+ inOutDataType[0],
+ inOutDataType[1],
availablePreferredBackends,
reasonIfUnsupported,
errMessages);
@@ -903,12 +910,33 @@ OptimizationResult AssignBackends(OptimizedNetworkImpl* optNetObjPtr,
for (auto it = firstLayer; it != lastLayer; ++it)
{
- AssignBackendsIConnectable(optNetObjPtr,
- *it,
- errMessages,
- result,
- backendSettings,
- availablePreferredBackends);
+ auto layer = PolymorphicDowncast<Layer*>(*it);
+ std::vector<DataType> inOutDataType = GetLayerInOutDatatype(layer);
+
+ // In AttemptBackendAssignment() we check:
+ // - if input/output datatypes of the layer are float16
+ // - if the layer is supported with these datatypes
+ // If the layer is not supported (failing on ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED() in clframework),
+ // we attempt to insert convertion layers either side of the new fp32 layer.
+ bool isFloat16 = false;
+ for (auto type : inOutDataType)
+ {
+ if (type == DataType::Float16)
+ {
+ isFloat16 = true;
+ break;
+ }
+ }
+
+ if (layer->GetBackendId() == "Unknown" || isFloat16)
+ {
+ AssignBackendsIConnectable(optNetObjPtr,
+ *it,
+ errMessages,
+ result,
+ backendSettings,
+ availablePreferredBackends);
+ }
}
for (auto it = firstLayer; it != lastLayer; ++it)
@@ -1540,6 +1568,8 @@ IOptimizedNetworkPtr Optimize(const Graph& inGraph,
const OptimizerOptions& options,
Optional<std::vector<std::string>&> messages)
{
+ const auto start_time = armnn::GetTimeNow();
+
ARMNN_LOG(debug) << options.ToString();
// Enable profiling
@@ -1723,6 +1753,9 @@ IOptimizedNetworkPtr Optimize(const Graph& inGraph,
optGraph.AddCompatibilityLayers(backends, tensorHandleFactoryRegistry);
}
+ ARMNN_LOG(info) << "!! New time !! : " << std::setprecision(2)
+ << std::fixed << armnn::GetTimeDuration(start_time).count() << " ms.";
+
return optNet;
}