aboutsummaryrefslogtreecommitdiff
path: root/shim/sl/canonical/ConversionUtils.hpp
diff options
context:
space:
mode:
authorCathal Corbett <cathal.corbett@arm.com>2022-09-01 11:34:37 +0100
committerCathal Corbett <cathal.corbett@arm.com>2022-12-12 12:38:15 +0000
commit5383767a7a759c867235ab66bd71f88281e3bd06 (patch)
tree5704c33171d39dda9e4428c953e2efdd62ead656 /shim/sl/canonical/ConversionUtils.hpp
parenta98e79a709f7c29728e1fc79c21ba5265993b8b6 (diff)
downloadarmnn-5383767a7a759c867235ab66bd71f88281e3bd06.tar.gz
Optimize the calling of IsLayerSupported().
* Done as part of 22.11/23.02 innovation days. * IsLayerSupported() is called in model prepare (delegate, android-nn-driver and shim/support_library) and again in ArmNN once model otimization is performed. * From calling IsLayerSupported() the first time, we should know that the layers are supported and what backend they are supported on. * Solution is to set the BackendId of the IConnectableLayer when IsLayerSupported() is called the first time, * In the Optimize() function we then check if the backend is set. If so, we do not call IsLayerSupported() again. * In the case a layer that is supported gets optimized, then the BackendId of that layer get set to "Unknown" for the new optimized layer and IsLayerSupported() will get called on the newly optimized layer. * Includes bug fix IVGCVSW-7213 for Android Mean FP16 CpuAcc tests. Also related to bug IVGCVSW-7211. Signed-off-by: Cathal Corbett <cathal.corbett@arm.com> Change-Id: I7a7820d0cdb079ffb5a3a2e0c44e252f652df53b
Diffstat (limited to 'shim/sl/canonical/ConversionUtils.hpp')
-rw-r--r--shim/sl/canonical/ConversionUtils.hpp16
1 files changed, 13 insertions, 3 deletions
diff --git a/shim/sl/canonical/ConversionUtils.hpp b/shim/sl/canonical/ConversionUtils.hpp
index beee00d11a..91a8e3080c 100644
--- a/shim/sl/canonical/ConversionUtils.hpp
+++ b/shim/sl/canonical/ConversionUtils.hpp
@@ -150,7 +150,7 @@ static bool Fail(const char* formatStr, Args&&... args)
// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
-#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
+#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, setBackend, ...) \
try \
{ \
for (auto&& backendId : backends) \
@@ -163,6 +163,7 @@ try \
layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
if (supported) \
{ \
+ setBackend = backendId; \
break; \
} \
else \
@@ -322,10 +323,12 @@ bool BroadcastTensor(LayerInputHandle& input0,
armnn::ReshapeDescriptor reshapeDescriptor;
bool isSupported = false;
+ armnn::BackendId setBackend;
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsReshapeSupported,
data.m_Backends,
isSupported,
+ setBackend,
smallInfo,
reshapedInfo,
reshapeDescriptor);
@@ -336,6 +339,7 @@ bool BroadcastTensor(LayerInputHandle& input0,
ARMNN_ASSERT(data.m_Network != nullptr);
armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
+ reshapeLayer.SetBackendId(setBackend);
if (input0IsSmaller)
{
@@ -527,7 +531,8 @@ inline bool RequiresReshape(armnn::TensorShape & inputShape)
inline void SwizzleInputs(armnn::INetwork& network,
std::vector<LayerInputHandle>& inputs,
std::vector<armnn::TensorShape>& inputShapes,
- const armnn::PermutationVector& mapping)
+ const armnn::PermutationVector& mapping,
+ std::vector<armnn::BackendId>& setBackends)
{
if (!mapping.IsEqual(IdentityPermutation4D))
{
@@ -536,6 +541,7 @@ inline void SwizzleInputs(armnn::INetwork& network,
{
// add swizzle layer
armnn::IConnectableLayer& swizzleLayer = AddTransposeLayer(network, inputs[i], mapping);
+ swizzleLayer.SetBackendId(setBackends[i]);
auto& outputSlot = swizzleLayer.GetOutputSlot(0);
auto& outputInfo = outputSlot.GetTensorInfo();
// replace inputs with the swizzled ones
@@ -553,6 +559,7 @@ bool TransposeInputTensors(ConversionData& data,
// If we have a IdentityPermutation4D or IdentityPermutation3D then we are not permuting
if (!mapping.IsEqual(IdentityPermutation4D) && !mapping.IsEqual(IdentityPermutation3D))
{
+ std::vector<armnn::BackendId> setBackendsVec;
armnn::TensorInfo outputTransposeInfo;
size_t nInputs = inputs.size();
for (size_t i=0; i<nInputs; ++i)
@@ -563,20 +570,23 @@ bool TransposeInputTensors(ConversionData& data,
outputTransposeInfo = armnnUtils::TransposeTensorShape(inputs[i].GetTensorInfo(), mapping);
bool isSupported = false;
+ armnn::BackendId setBackend;
FORWARD_LAYER_SUPPORT_FUNC(__func__,
IsTransposeSupported,
data.m_Backends,
isSupported,
+ setBackend,
inputs[i].GetTensorInfo(),
outputTransposeInfo,
transposeDesc);
+ setBackendsVec.push_back(setBackend);
if (!isSupported)
{
return false;
}
}
- SwizzleInputs(*data.m_Network, inputs, inputShapes, mapping);
+ SwizzleInputs(*data.m_Network, inputs, inputShapes, mapping, setBackendsVec);
}
return true;
}