From ec5f7d13582d1e477dc3473223b503388092a352 Mon Sep 17 00:00:00 2001 From: Teresa Charlin Date: Fri, 22 Oct 2021 17:15:00 +0100 Subject: IVGCVSW-6170 Add CpuAcc Conv3d Workload Signed-off-by: Teresa Charlin Change-Id: I3a5bfef5a0085d172fd3689e67f25af909ace2ee --- src/backends/neon/workloads/CMakeLists.txt | 2 + .../neon/workloads/NeonConvolution3dWorkload.cpp | 116 +++++++++++++++++++++ .../neon/workloads/NeonConvolution3dWorkload.hpp | 43 ++++++++ src/backends/neon/workloads/NeonWorkloads.hpp | 1 + 4 files changed, 162 insertions(+) create mode 100644 src/backends/neon/workloads/NeonConvolution3dWorkload.cpp create mode 100644 src/backends/neon/workloads/NeonConvolution3dWorkload.hpp (limited to 'src/backends/neon/workloads') diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt index 6451f4c5d7..0c64a19bf9 100644 --- a/src/backends/neon/workloads/CMakeLists.txt +++ b/src/backends/neon/workloads/CMakeLists.txt @@ -36,6 +36,8 @@ list(APPEND armnnNeonBackendWorkloads_sources NeonConvertFp32ToFp16Workload.hpp NeonConvolution2dWorkload.cpp NeonConvolution2dWorkload.hpp + NeonConvolution3dWorkload.cpp + NeonConvolution3dWorkload.hpp NeonDepthToSpaceWorkload.cpp NeonDepthToSpaceWorkload.hpp NeonDepthwiseConvolutionWorkload.cpp diff --git a/src/backends/neon/workloads/NeonConvolution3dWorkload.cpp b/src/backends/neon/workloads/NeonConvolution3dWorkload.cpp new file mode 100644 index 0000000000..1891981162 --- /dev/null +++ b/src/backends/neon/workloads/NeonConvolution3dWorkload.cpp @@ -0,0 +1,116 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "NeonConvolution3dWorkload.hpp" + +#include +#include +#include +#include +#include + +#include + +#include +#include + +namespace armnn +{ + +using namespace armcomputetensorutils; + +arm_compute::Status NeonConvolution3dWorkloadValidate(const TensorInfo& input, + const TensorInfo& output, + const Convolution3dDescriptor& descriptor, + const TensorInfo& weights, + const Optional& biases, + bool isFastMathEnabled, + const ActivationDescriptor* activationDescriptor) +{ + const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout); + const arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weights, descriptor.m_DataLayout); + arm_compute::TensorInfo aclBiasesInfo; + arm_compute::TensorInfo *optionalAclBiasesInfo = nullptr; + if (descriptor.m_BiasEnabled) + { + ARMNN_ASSERT(biases.has_value()); + + aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout); + optionalAclBiasesInfo = &aclBiasesInfo; + } + const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout); + + const arm_compute::Conv3dInfo aclConv3DInfo = ComputeConv3DInfo(descriptor, + isFastMathEnabled, + activationDescriptor); + + return arm_compute::NEConv3D::validate(&aclInputInfo, + &aclWeightsInfo, + optionalAclBiasesInfo, + &aclOutputInfo, + aclConv3DInfo); +} + +NeonConvolution3dWorkload::NeonConvolution3dWorkload(const Convolution3dQueueDescriptor& descriptor, + const WorkloadInfo& info, + std::shared_ptr& memoryManager, + const bool isFastMathEnabled) + : BaseWorkload(descriptor, info) +{ + IgnoreUnused(memoryManager); + + using arm_compute::NEConv3D; + uint32_t numInputs = m_Data.m_Parameters.m_BiasEnabled ? 3: 2; + m_Data.ValidateInputsOutputs("NeonConvolution3dWorkload", numInputs, 1); + + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& weights = PolymorphicDowncast(m_Data.m_Inputs[1])->GetTensor(); + arm_compute::ITensor* biasesPtr = nullptr; + if (m_Data.m_Parameters.m_BiasEnabled) + { + biasesPtr = &PolymorphicDowncast(m_Data.m_Inputs[2])->GetTensor(); + } + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); + + arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); + input.info()->set_data_layout(aclDataLayout); + weights.info()->set_data_layout(aclDataLayout); + output.info()->set_data_layout(aclDataLayout); + + const arm_compute::Conv3dInfo aclConv3DInfo = ComputeConv3DInfo(descriptor, isFastMathEnabled); + + auto convolutionLayer = std::make_unique(); + convolutionLayer->configure(&input, + &weights, + biasesPtr, + &output, + aclConv3DInfo); + + // Add details for profiling output + WorkloadInfo detailsInfo; + + detailsInfo.m_InputTensorInfos = info.m_InputTensorInfos; + detailsInfo.m_OutputTensorInfos = info.m_OutputTensorInfos; + + // Report Profiling Details + ARMNN_REPORT_PROFILING_WORKLOAD_DESC("NeonConvolution3dWorkload_Construct", + descriptor.m_Parameters, + detailsInfo, + this->GetGuid()); + + m_ConvolutionLayer.reset(convolutionLayer.release()); + + ARMNN_ASSERT(m_ConvolutionLayer); + + m_ConvolutionLayer->prepare(); +} + +void NeonConvolution3dWorkload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonConvolution3dWorkload_Execute", this->GetGuid()); + m_ConvolutionLayer->run(); +} + +} //namespace armnn diff --git a/src/backends/neon/workloads/NeonConvolution3dWorkload.hpp b/src/backends/neon/workloads/NeonConvolution3dWorkload.hpp new file mode 100644 index 0000000000..b5175e963f --- /dev/null +++ b/src/backends/neon/workloads/NeonConvolution3dWorkload.hpp @@ -0,0 +1,43 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include + +#include +#include +#include + +#include + +namespace armnn +{ + +arm_compute::Status NeonConvolution3dWorkloadValidate(const TensorInfo& input, + const TensorInfo& output, + const Convolution3dDescriptor& descriptor, + const TensorInfo& weights, + const Optional& biases, + bool isFastMathEnabled = false, + const ActivationDescriptor* activationDescriptor = nullptr); + +class NeonConvolution3dWorkload : public BaseWorkload +{ +public: + using BaseWorkload::m_Data; + + NeonConvolution3dWorkload(const Convolution3dQueueDescriptor& descriptor, + const WorkloadInfo& info, + std::shared_ptr& memoryManager, + const bool isFastMathENabled = false); + + void Execute() const override; + +private: + std::unique_ptr m_ConvolutionLayer; +}; + +} //namespace armnn diff --git a/src/backends/neon/workloads/NeonWorkloads.hpp b/src/backends/neon/workloads/NeonWorkloads.hpp index 4d51d186e9..a8134a130b 100644 --- a/src/backends/neon/workloads/NeonWorkloads.hpp +++ b/src/backends/neon/workloads/NeonWorkloads.hpp @@ -20,6 +20,7 @@ #include "NeonConvertFp32ToBf16Workload.hpp" #include "NeonConvertFp32ToFp16Workload.hpp" #include "NeonConvolution2dWorkload.hpp" +#include "NeonConvolution3dWorkload.hpp" #include "NeonDepthToSpaceWorkload.hpp" #include "NeonDepthwiseConvolutionWorkload.hpp" #include "NeonDequantizeWorkload.hpp" -- cgit v1.2.1