diff options
author | Jim Flynn <jim.flynn@arm.com> | 2019-05-17 13:03:57 +0100 |
---|---|---|
committer | Derek Lamberti <derek.lamberti@arm.com> | 2019-05-20 08:00:07 +0000 |
commit | 6905941332e243c518bc30ed18c5a222fb50a90a (patch) | |
tree | 6e98379a891a3415fb788f966d74538c33a99447 /src/backends/cl/workloads/ClConcatWorkload.cpp | |
parent | 4ed34edd7fbc6569d7ba0bc7188efa287d42e239 (diff) | |
download | armnn-6905941332e243c518bc30ed18c5a222fb50a90a.tar.gz |
IVGCVSW-3117 Rename ClMergerWorkload to ClConcatWorkload
Change-Id: Ie0394336a772afa7b075eb562ac5191c8b3ec9f3
Signed-off-by: Jim Flynn <jim.flynn@arm.com>
Diffstat (limited to 'src/backends/cl/workloads/ClConcatWorkload.cpp')
-rw-r--r-- | src/backends/cl/workloads/ClConcatWorkload.cpp | 100 |
1 files changed, 100 insertions, 0 deletions
diff --git a/src/backends/cl/workloads/ClConcatWorkload.cpp b/src/backends/cl/workloads/ClConcatWorkload.cpp new file mode 100644 index 0000000000..ee4ba6b65f --- /dev/null +++ b/src/backends/cl/workloads/ClConcatWorkload.cpp @@ -0,0 +1,100 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#include "ClConcatWorkload.hpp" +#include "ClWorkloadUtils.hpp" +#include <aclCommon/ArmComputeTensorUtils.hpp> +#include <backendsCommon/CpuTensorHandle.hpp> +#include <cl/ClTensorHandle.hpp> +#include <cl/ClLayerSupport.hpp> + +#include <arm_compute/core/Types.h> + +#include <boost/polymorphic_pointer_cast.hpp> + +namespace armnn +{ +using namespace armcomputetensorutils; + +namespace +{ +size_t CalcAxis(const MergerDescriptor& desc) +{ + return (desc.GetNumDimensions() - desc.GetConcatAxis()) - 1; +} +} //namespace + +arm_compute::Status ClConcatWorkloadValidate(const std::vector<const TensorInfo*>& inputs, + const TensorInfo& output, + const MergerDescriptor& descriptor) +{ + std::vector<arm_compute::TensorInfo> aclInputs; + for (const TensorInfo* input : inputs) + { + arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(*input, armnn::DataLayout::NCHW); + aclInputs.emplace_back(aclInputInfo); + } + const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output); + std::vector<arm_compute::ITensorInfo*> aclInputPtrs; + for (arm_compute::ITensorInfo& input : aclInputs) + { + aclInputPtrs.emplace_back(&input); + } + + size_t aclAxis = CalcAxis(descriptor); + return arm_compute::CLConcatenateLayer::validate(aclInputPtrs, &aclOutputInfo, aclAxis); +} + +ClConcatWorkload::ClConcatWorkload(const MergerQueueDescriptor& descriptor, const WorkloadInfo& info) +: BaseWorkload<MergerQueueDescriptor>(descriptor, info) +{ + bool allInputsAreSubtensors = true; + + // Check that all inputs are sub-tensors + for (auto input : descriptor.m_Inputs) + { + if (!input->GetParent()) + { + // Non sub-tensor input found so we need to execute the merger function + allInputsAreSubtensors = false; + break; + } + } + + if (allInputsAreSubtensors) + { + // Can skip configuring the merger function since it's not executed + return; + } + + std::vector<arm_compute::ICLTensor *> aclInputs; + for (auto input : m_Data.m_Inputs) + { + arm_compute::ICLTensor& aclInput = boost::polymorphic_pointer_downcast<IClTensorHandle>(input)->GetTensor(); + aclInputs.emplace_back(&aclInput); + } + arm_compute::ICLTensor& output = boost::polymorphic_pointer_downcast<IClTensorHandle>( + m_Data.m_Outputs[0])->GetTensor(); + + // Create the layer function + m_Layer.reset(new arm_compute::CLConcatenateLayer()); + + // Configure input and output tensors + size_t aclAxis = CalcAxis(descriptor.m_Parameters); + m_Layer->configure(aclInputs, &output, aclAxis); + + // Prepare + m_Layer->prepare(); +} + +void ClConcatWorkload::Execute() const +{ + if (m_Layer) + { + ARMNN_SCOPED_PROFILING_EVENT_CL("ClConcatWorkload_Execute"); + m_Layer->run(); + } +} + +} //namespace armnn
\ No newline at end of file |