aboutsummaryrefslogtreecommitdiff
path: root/src/backends/cl/ClLayerSupport.cpp
diff options
context:
space:
mode:
authorDerek Lamberti <derek.lamberti@arm.com>2019-04-15 18:37:35 +0100
committerderek.lamberti <derek.lamberti@arm.com>2019-04-16 13:50:11 +0000
commit0790dcea1056298d63f97dec904c8ade5d21f439 (patch)
treed75967e2eabe39ec08dc928fa77a7d4a51d85c5d /src/backends/cl/ClLayerSupport.cpp
parentb98bbcfa2a809c4ad025883c059ae49c82b37cbd (diff)
downloadarmnn-0790dcea1056298d63f97dec904c8ade5d21f439.tar.gz
IVGCVSW-2957 MergerLayer subtensor optimization now backend agnostic
+ Update clframework pin + Cl and Neon Merger workloads updated to use MemoryLayout agnostic API + Workloads only use sub-tensor optimization if ALL input tensors are sub-tensors + Refactor LayerSupportCommon code to be a bit more succinct Change-Id: Ib61ad4ccbd767e924dff07e61022e0cda4069828 Signed-off-by: Derek Lamberti <derek.lamberti@arm.com>
Diffstat (limited to 'src/backends/cl/ClLayerSupport.cpp')
-rw-r--r--src/backends/cl/ClLayerSupport.cpp31
1 files changed, 25 insertions, 6 deletions
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index cfc0f11d25..a5c5f2bd9d 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -416,7 +416,14 @@ bool ClLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inpu
const OriginsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- if(descriptor.GetNumDimensions() - descriptor.GetConcatAxis() == 1)
+ if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
+ {
+ SetValueChecked(reasonIfUnsupported, "Cl Merger: Concat axis > Number of dimensions.");
+ return false;
+ }
+
+ unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
+ if(concatInnerAxis < 3) // Width, height, or channels
{
FORWARD_WORKLOAD_VALIDATE_FUNC(ClMergerWorkloadValidate,
reasonIfUnsupported,
@@ -424,12 +431,24 @@ bool ClLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inpu
output,
descriptor);
}
- else
+ else if (concatInnerAxis == 3)
+ {
+ // We rely on the sub-tensor optimization to handle the batch dimension for 4D tensors. If we can't use
+ // sub-tensors for this then we can't support it. Here is where we check that the sub-tensors will work.
+ for (auto& input : inputs)
+ {
+ if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
+ {
+ SetValueChecked(reasonIfUnsupported, "Cl Merger: Types and quantization parameters must match.");
+ return false;
+ }
+ }
+ return true; // Sub-tensors support concat along batch
+ }
+ else // > 4 dimensions not supported.
{
- return IsSupportedForDataTypeCl(reasonIfUnsupported,
- inputs[0]->GetDataType(),
- &TrueFunc<>,
- &TrueFunc<>);
+ SetValueChecked(reasonIfUnsupported, "Cl Merger: Maximum of 4 dimensions supported.");
+ return false;
}
}