aboutsummaryrefslogtreecommitdiff
path: root/delegate/src/DelegateUtils.hpp
diff options
context:
space:
mode:
authorCathal Corbett <cathal.corbett@arm.com>2022-09-01 11:34:37 +0100
committerCathal Corbett <cathal.corbett@arm.com>2022-12-12 12:38:15 +0000
commit5383767a7a759c867235ab66bd71f88281e3bd06 (patch)
tree5704c33171d39dda9e4428c953e2efdd62ead656 /delegate/src/DelegateUtils.hpp
parenta98e79a709f7c29728e1fc79c21ba5265993b8b6 (diff)
downloadarmnn-5383767a7a759c867235ab66bd71f88281e3bd06.tar.gz
Optimize the calling of IsLayerSupported().
* Done as part of 22.11/23.02 innovation days. * IsLayerSupported() is called in model prepare (delegate, android-nn-driver and shim/support_library) and again in ArmNN once model otimization is performed. * From calling IsLayerSupported() the first time, we should know that the layers are supported and what backend they are supported on. * Solution is to set the BackendId of the IConnectableLayer when IsLayerSupported() is called the first time, * In the Optimize() function we then check if the backend is set. If so, we do not call IsLayerSupported() again. * In the case a layer that is supported gets optimized, then the BackendId of that layer get set to "Unknown" for the new optimized layer and IsLayerSupported() will get called on the newly optimized layer. * Includes bug fix IVGCVSW-7213 for Android Mean FP16 CpuAcc tests. Also related to bug IVGCVSW-7211. Signed-off-by: Cathal Corbett <cathal.corbett@arm.com> Change-Id: I7a7820d0cdb079ffb5a3a2e0c44e252f652df53b
Diffstat (limited to 'delegate/src/DelegateUtils.hpp')
-rw-r--r--delegate/src/DelegateUtils.hpp15
1 files changed, 14 insertions, 1 deletions
diff --git a/delegate/src/DelegateUtils.hpp b/delegate/src/DelegateUtils.hpp
index 58d8048be3..850b279fea 100644
--- a/delegate/src/DelegateUtils.hpp
+++ b/delegate/src/DelegateUtils.hpp
@@ -25,7 +25,7 @@ namespace
{
// Macro to call an Is<layer_name>Supported function and log caller name together with reason for lack of support
-#define FORWARD_LAYER_SUPPORT_FUNC(opName, tfLiteContext, func, backends, supported, ...) \
+#define FORWARD_LAYER_SUPPORT_FUNC(opName, tfLiteContext, func, backends, supported, setBackend, ...) \
try \
{ \
for (auto&& backendId : backends) \
@@ -38,6 +38,7 @@ try \
layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
if (supported) \
{ \
+ setBackend = backendId; \
break; \
} \
else \
@@ -224,11 +225,13 @@ armnn::IConnectableLayer* BroadcastTensor(const armnn::TensorInfo& inputInfo0,
armnn::ReshapeDescriptor reshapeDescriptor;
reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
bool isSupported = false;
+ armnn::BackendId setBackend;
FORWARD_LAYER_SUPPORT_FUNC("RESHAPE",
tfLiteContext,
IsReshapeSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
smallInfo,
reshapedInfo,
reshapeDescriptor);
@@ -240,6 +243,7 @@ armnn::IConnectableLayer* BroadcastTensor(const armnn::TensorInfo& inputInfo0,
ARMNN_ASSERT(delegateData.m_Network != nullptr);
// Add Reshape layer
armnn::IConnectableLayer* reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor);
+ reshapeLayer->SetBackendId(setBackend);
ARMNN_ASSERT(reshapeLayer != nullptr);
reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
@@ -331,11 +335,13 @@ TfLiteStatus FusedActivation(TfLiteContext* tfLiteContext,
}
bool isSupported = false;
+ armnn::BackendId setBackend;
FORWARD_LAYER_SUPPORT_FUNC("ACTIVATION",
tfLiteContext,
IsActivationSupported,
data.m_Backends,
isSupported,
+ setBackend,
prevLayer->GetOutputSlot(0).GetTensorInfo(),
activationOutputInfo,
activationDesc);
@@ -344,6 +350,7 @@ TfLiteStatus FusedActivation(TfLiteContext* tfLiteContext,
return kTfLiteError;
}
armnn::IConnectableLayer* activationLayer = data.m_Network->AddActivationLayer(activationDesc);
+ activationLayer->SetBackendId(setBackend);
ARMNN_ASSERT(activationLayer != nullptr);
activationLayer->GetOutputSlot(0).SetTensorInfo(activationOutputInfo);
@@ -566,11 +573,13 @@ TfLiteStatus ConnectConstant(armnn::IConnectableLayer* layer,
{
IgnoreUnused(layer);
bool isSupported = false;
+ armnn::BackendId setBackend;
FORWARD_LAYER_SUPPORT_FUNC("CONSTANT",
tfLiteContext,
IsConstantSupported,
data.m_Backends,
isSupported,
+ setBackend,
constTensorInfo);
if (!isSupported)
{
@@ -581,6 +590,7 @@ TfLiteStatus ConnectConstant(armnn::IConnectableLayer* layer,
constTensorInfo,
armnn::Optional<armnn::PermutationVector&>());
armnn::IConnectableLayer* constantLayer = data.m_Network->AddConstantLayer(constantInput);
+ constantLayer->SetBackendId(setBackend);
armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
outputSlot.SetTensorInfo(constTensorInfo);
@@ -615,11 +625,13 @@ TfLiteStatus ProcessInputs(armnn::IConnectableLayer* layer,
{
armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
bool isSupported = false;
+ armnn::BackendId setBackend;
FORWARD_LAYER_SUPPORT_FUNC("CONSTANT",
tfLiteContext,
IsConstantSupported,
delegateData.m_Backends,
isSupported,
+ setBackend,
inputTensorInfo);
if (!isSupported)
{
@@ -629,6 +641,7 @@ TfLiteStatus ProcessInputs(armnn::IConnectableLayer* layer,
inputTensorInfo,
armnn::Optional<armnn::PermutationVector&>());
armnn::IConnectableLayer* constantLayer = delegateData.m_Network->AddConstantLayer(constantInput);
+ constantLayer->SetBackendId(setBackend);
armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
outputSlot.SetTensorInfo(inputTensorInfo);