aboutsummaryrefslogtreecommitdiff
path: root/src/backends/neon
diff options
context:
space:
mode:
authorDerek Lamberti <derek.lamberti@arm.com>2019-04-15 18:37:35 +0100
committerderek.lamberti <derek.lamberti@arm.com>2019-04-16 13:50:11 +0000
commit0790dcea1056298d63f97dec904c8ade5d21f439 (patch)
treed75967e2eabe39ec08dc928fa77a7d4a51d85c5d /src/backends/neon
parentb98bbcfa2a809c4ad025883c059ae49c82b37cbd (diff)
downloadarmnn-0790dcea1056298d63f97dec904c8ade5d21f439.tar.gz
IVGCVSW-2957 MergerLayer subtensor optimization now backend agnostic
+ Update clframework pin + Cl and Neon Merger workloads updated to use MemoryLayout agnostic API + Workloads only use sub-tensor optimization if ALL input tensors are sub-tensors + Refactor LayerSupportCommon code to be a bit more succinct Change-Id: Ib61ad4ccbd767e924dff07e61022e0cda4069828 Signed-off-by: Derek Lamberti <derek.lamberti@arm.com>
Diffstat (limited to 'src/backends/neon')
-rw-r--r--src/backends/neon/NeonLayerSupport.cpp38
-rw-r--r--src/backends/neon/NeonWorkloadFactory.cpp6
-rw-r--r--src/backends/neon/workloads/NeonMergerWorkload.cpp52
-rw-r--r--src/backends/neon/workloads/NeonMergerWorkload.hpp7
4 files changed, 68 insertions, 35 deletions
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index 46a7e6f79f..898660cc91 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -52,10 +52,7 @@ bool IsNeonBackendSupported(Optional<std::string&> reasonIfUnsupported)
#if defined(ARMCOMPUTENEON_ENABLED)
return true;
#else
- if (reasonIfUnsupported)
- {
- reasonIfUnsupported.value() = "The armnn library has been built without NEON support";
- }
+ SetValueChecked(reasonIfUnsupported, "The armnn library has been built without NEON support");
return false;
#endif
}
@@ -304,7 +301,14 @@ bool NeonLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> in
const OriginsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- if(descriptor.GetNumDimensions() - descriptor.GetConcatAxis() == 1)
+ if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
+ {
+ SetValueChecked(reasonIfUnsupported, "Neon Merger: Concat axis > Number of dimensions.");
+ return false;
+ }
+
+ unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
+ if(concatInnerAxis < 3) // Width, height, or channels
{
FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMergerWorkloadValidate,
reasonIfUnsupported,
@@ -312,13 +316,23 @@ bool NeonLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> in
output,
descriptor);
}
- else
- {
- return IsSupportedForDataTypeNeon(reasonIfUnsupported,
- inputs[0]->GetDataType(),
- &TrueFunc<>,
- &TrueFunc<>);
- }
+ else if (concatInnerAxis == 3)
+ {
+ for (auto& input : inputs)
+ {
+ if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
+ {
+ SetValueChecked(reasonIfUnsupported, "Neon Merger: Types and quantization parameters must match.");
+ return false;
+ }
+ }
+ return true; // Sub-tensors support concat along batch
+ }
+ else // > 4 dimensions not supported.
+ {
+ SetValueChecked(reasonIfUnsupported, "Neon Merger: Maximum of 4 dimensions supported.");
+ return false;
+ }
}
bool NeonLayerSupport::IsMinimumSupported(const TensorInfo& input0,
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index 101e59d0c4..8db5f9ad84 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -61,6 +61,12 @@ std::unique_ptr<ITensorHandle> NeonWorkloadFactory::CreateSubTensorHandle(ITenso
coords.set(i, boost::numeric_cast<int>(subTensorOrigin[revertedIndex]));
}
+ const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.GetShape());
+ if (!::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, parentShape, coords, shape))
+ {
+ return nullptr;
+ }
+
return std::make_unique<NeonSubTensorHandle>(
boost::polymorphic_downcast<INeonTensorHandle*>(&parent), shape, coords);
}
diff --git a/src/backends/neon/workloads/NeonMergerWorkload.cpp b/src/backends/neon/workloads/NeonMergerWorkload.cpp
index be096b4b25..64d4d93d97 100644
--- a/src/backends/neon/workloads/NeonMergerWorkload.cpp
+++ b/src/backends/neon/workloads/NeonMergerWorkload.cpp
@@ -11,12 +11,20 @@
#include <backendsCommon/CpuTensorHandle.hpp>
#include <neon/NeonTensorHandle.hpp>
-#include <arm_compute/runtime/NEON/functions/NEConcatenateLayer.h>
+
namespace armnn
{
using namespace armcomputetensorutils;
+namespace
+{
+size_t CalcAxis(const armnn::MergerDescriptor& desc)
+{
+ return (desc.GetNumDimensions() - desc.GetConcatAxis()) - 1;
+}
+} //namespace
+
arm_compute::Status NeonMergerWorkloadValidate(const std::vector<const TensorInfo*>& inputs,
const TensorInfo& output,
const MergerDescriptor& descriptor)
@@ -25,60 +33,66 @@ arm_compute::Status NeonMergerWorkloadValidate(const std::vector<const TensorInf
std::vector<arm_compute::TensorInfo> aclInputs;
for (const TensorInfo* input : inputs)
{
- arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(*input, armnn::DataLayout::NCHW);
- aclInputs.emplace_back(aclInputInfo);
+ arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(*input, armnn::DataLayout::NCHW);
+ aclInputs.emplace_back(aclInputInfo);
}
const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
- arm_compute::DataLayoutDimension aclAxis = arm_compute::DataLayoutDimension::WIDTH;
-
std::vector<arm_compute::ITensorInfo*> aclInputPtrs;
for (arm_compute::ITensorInfo& input : aclInputs)
{
aclInputPtrs.emplace_back(&input);
}
+ size_t aclAxis = CalcAxis(descriptor);
return arm_compute::NEConcatenateLayer::validate(aclInputPtrs, &aclOutputInfo, aclAxis);
-
}
NeonMergerWorkload::NeonMergerWorkload(
const MergerQueueDescriptor& descriptor, const WorkloadInfo& info)
: BaseWorkload<MergerQueueDescriptor>(descriptor, info)
{
- m_Execute = true;
+ bool allInputsAreSubtensors = true;
- unsigned int innerAxisOrder = descriptor.m_Parameters.GetNumDimensions() - descriptor.m_Parameters.GetConcatAxis();
+ // Check that all inputs are sub-tensors
+ for (auto input : descriptor.m_Inputs)
+ {
+ if (!input->GetParent())
+ {
+ // Non sub-tensor input found so we need to execute the merger function
+ allInputsAreSubtensors = false;
+ break;
+ }
+ }
- if (innerAxisOrder != 1)
+ if (allInputsAreSubtensors)
{
- m_Execute = false;
+ // Can skip configuring the merger function since it's not executed
return;
}
std::vector<arm_compute::ITensor *> aclInputs;
- arm_compute::DataLayout aclDataLayout = ConvertDataLayout(armnn::DataLayout::NCHW);
for (auto input : m_Data.m_Inputs)
{
arm_compute::ITensor& aclInput = boost::polymorphic_pointer_downcast<INeonTensorHandle>(input)->GetTensor();
- aclInput.info()->set_data_layout(aclDataLayout);
aclInputs.emplace_back(&aclInput);
}
arm_compute::ITensor& output = boost::polymorphic_pointer_downcast<INeonTensorHandle>(
- m_Data.m_Outputs[0])->GetTensor();
- output.info()->set_data_layout(aclDataLayout);
+ m_Data.m_Outputs[0])->GetTensor();
- arm_compute::DataLayoutDimension aclAxis = arm_compute::DataLayoutDimension::WIDTH;
+ // Create the layer function
+ m_Layer.reset(new arm_compute::NEConcatenateLayer());
- auto layer = std::make_unique<arm_compute::NEConcatenateLayer>();
- layer->configure(aclInputs, &output, aclAxis);
- m_Layer.reset(layer.release());
+ // Configure input and output tensors
+ size_t aclAxis = CalcAxis(descriptor.m_Parameters);
+ m_Layer->configure(aclInputs, &output, aclAxis);
+ // Prepare
m_Layer->prepare();
}
void NeonMergerWorkload::Execute() const
{
- if (m_Execute)
+ if (m_Layer)
{
ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonMergerWorkload_Execute");
m_Layer->run();
diff --git a/src/backends/neon/workloads/NeonMergerWorkload.hpp b/src/backends/neon/workloads/NeonMergerWorkload.hpp
index 3432c626cb..1dd9309aff 100644
--- a/src/backends/neon/workloads/NeonMergerWorkload.hpp
+++ b/src/backends/neon/workloads/NeonMergerWorkload.hpp
@@ -9,7 +9,8 @@
#include <arm_compute/core/Error.h>
#include <arm_compute/runtime/IFunction.h>
-#
+#include <arm_compute/runtime/NEON/functions/NEConcatenateLayer.h>
+
#include <memory>
namespace armnn
@@ -27,9 +28,7 @@ public:
void Execute() const override;
private:
- std::unique_ptr<arm_compute::IFunction> m_Layer;
- bool m_Execute;
-
+ std::unique_ptr<arm_compute::NEConcatenateLayer> m_Layer;
};
} //namespace armnn