From 8599a415c159aa867db12853b3195f0f0a51ee6b Mon Sep 17 00:00:00 2001 From: Nikhil Raj Date: Mon, 19 Nov 2018 14:51:07 +0000 Subject: IVGCVSW-2043 - Merger using ACL for innermost concat axis * Add ClMergerWorkload and NeonMergerWorkload to call ACL for innermost concat axis * Modify layer support to call ClMergerWorkloadValidate and NeonMergerWorkloadValidate when concat axis is inner most * Add m_ConcatAxis to MergerDescriptor * Modify MergerQueueDescriptor::Validate to check sub tensor only when using subtensor !android-nn-driver:166 Change-Id: I56676b43964c8d6d726387b41b3cc34a512c0f0a --- src/backends/neon/workloads/CMakeLists.txt | 1 + src/backends/neon/workloads/NeonMergerWorkload.cpp | 84 ++++++++++++++++++++++ src/backends/neon/workloads/NeonMergerWorkload.hpp | 16 +++-- 3 files changed, 97 insertions(+), 4 deletions(-) create mode 100644 src/backends/neon/workloads/NeonMergerWorkload.cpp (limited to 'src/backends/neon/workloads') diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt index f6e8d1c04a..e383b04f25 100644 --- a/src/backends/neon/workloads/CMakeLists.txt +++ b/src/backends/neon/workloads/CMakeLists.txt @@ -28,6 +28,7 @@ list(APPEND armnnNeonBackendWorkloads_sources NeonL2NormalizationFloatWorkload.hpp NeonLstmFloatWorkload.cpp NeonLstmFloatWorkload.hpp + NeonMergerWorkload.cpp NeonMergerWorkload.hpp NeonMultiplicationFloatWorkload.cpp NeonMultiplicationFloatWorkload.hpp diff --git a/src/backends/neon/workloads/NeonMergerWorkload.cpp b/src/backends/neon/workloads/NeonMergerWorkload.cpp new file mode 100644 index 0000000000..f82e24453a --- /dev/null +++ b/src/backends/neon/workloads/NeonMergerWorkload.cpp @@ -0,0 +1,84 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "NeonMergerWorkload.hpp" +#include +#include +#include +#include + + +namespace armnn +{ +using namespace armcomputetensorutils; + +arm_compute::Status NeonMergerWorkloadValidate(const std::vector& inputs, + const TensorInfo& output, + const MergerDescriptor& descriptor) + +{ + std::vector aclInputs; + for (const TensorInfo* input : inputs) + { + arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(*input, armnn::DataLayout::NCHW); + aclInputs.emplace_back(aclInputInfo); + } + const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output); + arm_compute::DataLayoutDimension aclAxis = arm_compute::DataLayoutDimension::WIDTH; + + std::vector aclInputPtrs; + for (arm_compute::ITensorInfo& input : aclInputs) + { + aclInputPtrs.emplace_back(&input); + } + + return arm_compute::NEConcatenateLayer::validate(aclInputPtrs, &aclOutputInfo, aclAxis); + +} + +NeonMergerWorkload::NeonMergerWorkload( +const MergerQueueDescriptor& descriptor, const WorkloadInfo& info) + : BaseWorkload(descriptor, info) +{ + m_Execute = true; + + unsigned int innerAxisOrder = descriptor.m_Parameters.GetNumDimensions() - descriptor.m_Parameters.GetConcatAxis(); + + if (innerAxisOrder != 1) + { + m_Execute = false; + return; + } + + std::vector aclInputs; + arm_compute::DataLayout aclDataLayout = ConvertDataLayout(armnn::DataLayout::NCHW); + for (auto input : m_Data.m_Inputs) + { + arm_compute::ITensor& aclInput = boost::polymorphic_pointer_downcast(input)->GetTensor(); + aclInput.info()->set_data_layout(aclDataLayout); + aclInputs.emplace_back(&aclInput); + } + arm_compute::ITensor& output = boost::polymorphic_pointer_downcast( + m_Data.m_Outputs[0])->GetTensor(); + output.info()->set_data_layout(aclDataLayout); + + arm_compute::DataLayoutDimension aclAxis = arm_compute::DataLayoutDimension::WIDTH; + + m_Layer.configure(aclInputs, &output, aclAxis); + + m_Layer.prepare(); +} + +void NeonMergerWorkload::Execute() const +{ + if (m_Execute) + { + ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonMergerWorkload_Execute"); + m_Layer.run(); + } +} + +} //namespace armnn + diff --git a/src/backends/neon/workloads/NeonMergerWorkload.hpp b/src/backends/neon/workloads/NeonMergerWorkload.hpp index 7103d8a469..a4f36d18bc 100644 --- a/src/backends/neon/workloads/NeonMergerWorkload.hpp +++ b/src/backends/neon/workloads/NeonMergerWorkload.hpp @@ -6,18 +6,26 @@ #pragma once #include +#include namespace armnn { +arm_compute::Status NeonMergerWorkloadValidate(const std::vector& inputs, + const TensorInfo& output, + const MergerDescriptor& descriptor); + class NeonMergerWorkload : public BaseWorkload { public: + NeonMergerWorkload(const MergerQueueDescriptor& descriptor, const WorkloadInfo& info); + using BaseWorkload::BaseWorkload; + void Execute() const override; + +private: + mutable arm_compute::NEConcatenateLayer m_Layer; + bool m_Execute; - virtual void Execute() const override - { - // With subtensors, merger is a no-op. - } }; } //namespace armnn -- cgit v1.2.1