ArmNN
 23.02
NeonConvolution3dWorkload Class Reference

#include <NeonConvolution3dWorkload.hpp>

Inheritance diagram for NeonConvolution3dWorkload:
NeonBaseWorkload< Convolution3dQueueDescriptor > BaseWorkload< Convolution3dQueueDescriptor > IWorkload

Public Member Functions

 NeonConvolution3dWorkload (const Convolution3dQueueDescriptor &descriptor, const WorkloadInfo &info, std::shared_ptr< arm_compute::MemoryManagerOnDemand > &memoryManager, const bool isFastMathENabled=false)
 
void Execute () const override
 
- Public Member Functions inherited from NeonBaseWorkload< Convolution3dQueueDescriptor >
 NeonBaseWorkload (const Convolution3dQueueDescriptor &descriptor, const WorkloadInfo &info)
 
void ReplaceInputTensorHandle (ITensorHandle *tensorHandle, unsigned int slot) override
 
void ReplaceOutputTensorHandle (ITensorHandle *tensorHandle, unsigned int slot) override
 
- Public Member Functions inherited from BaseWorkload< Convolution3dQueueDescriptor >
 BaseWorkload (const Convolution3dQueueDescriptor &descriptor, const WorkloadInfo &info)
 
void ExecuteAsync (ExecutionData &executionData) override
 
void PostAllocationConfigure () override
 
const Convolution3dQueueDescriptorGetData () const
 
arm::pipe::ProfilingGuid GetGuid () const final
 
virtual bool SupportsTensorHandleReplacement () const override
 
- Public Member Functions inherited from IWorkload
virtual ~IWorkload ()
 
virtual arm::pipe::ProfilingGuid GetGuid () const =0
 
virtual bool SupportsTensorHandleReplacement () const =0
 
virtual void RegisterDebugCallback (const DebugCallbackFunction &)
 
virtual armnn::Optional< armnn::MemoryRequirementsGetMemoryRequirements ()
 

Additional Inherited Members

- Protected Member Functions inherited from NeonBaseWorkload< Convolution3dQueueDescriptor >
virtual void Reconfigure ()
 
- Protected Attributes inherited from BaseWorkload< Convolution3dQueueDescriptor >
Convolution3dQueueDescriptor m_Data
 
const arm::pipe::ProfilingGuid m_Guid
 

Detailed Description

Definition at line 27 of file NeonConvolution3dWorkload.hpp.

Constructor & Destructor Documentation

◆ NeonConvolution3dWorkload()

NeonConvolution3dWorkload ( const Convolution3dQueueDescriptor descriptor,
const WorkloadInfo info,
std::shared_ptr< arm_compute::MemoryManagerOnDemand > &  memoryManager,
const bool  isFastMathENabled = false 
)

Definition at line 56 of file NeonConvolution3dWorkload.cpp.

60  : NeonBaseWorkload<Convolution3dQueueDescriptor>(descriptor, info)
61 {
62  IgnoreUnused(memoryManager);
63 
64  using arm_compute::NEConv3D;
65  uint32_t numInputs = m_Data.m_Parameters.m_BiasEnabled ? 3: 2;
66  m_Data.ValidateInputsOutputs("NeonConvolution3dWorkload", numInputs, 1);
67 
68  arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
69  arm_compute::ITensor& weights = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
70  arm_compute::ITensor* biasesPtr = nullptr;
72  {
73  biasesPtr = &PolymorphicDowncast<IAclTensorHandle *>(m_Data.m_Inputs[2])->GetTensor();
74  }
75  arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
76 
77  arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
78  input.info()->set_data_layout(aclDataLayout);
79  weights.info()->set_data_layout(aclDataLayout);
80  output.info()->set_data_layout(aclDataLayout);
81 
82  const arm_compute::Conv3dInfo aclConv3DInfo = ComputeConv3DInfo(descriptor, isFastMathEnabled);
83 
84  auto convolutionLayer = std::make_unique<arm_compute::NEConv3D>();
85  convolutionLayer->configure(&input,
86  &weights,
87  biasesPtr,
88  &output,
89  aclConv3DInfo);
90 
91  // Add details for profiling output
92  WorkloadInfo detailsInfo;
93 
94  detailsInfo.m_InputTensorInfos = info.m_InputTensorInfos;
95  detailsInfo.m_OutputTensorInfos = info.m_OutputTensorInfos;
96 
97  // Report Profiling Details
98  ARMNN_REPORT_PROFILING_WORKLOAD_DESC("NeonConvolution3dWorkload_Construct",
99  descriptor.m_Parameters,
100  detailsInfo,
101  this->GetGuid());
102 
103  m_ConvolutionLayer.reset(convolutionLayer.release());
104 
105  ARMNN_ASSERT(m_ConvolutionLayer);
106 
107  m_ConvolutionLayer->prepare();
108 }

References armnn::IgnoreUnused(), armnn::info, Convolution3dDescriptor::m_BiasEnabled, BaseWorkload< Convolution3dQueueDescriptor >::m_Data, QueueDescriptor::m_Inputs, QueueDescriptor::m_Outputs, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, and QueueDescriptor::ValidateInputsOutputs().

Member Function Documentation

◆ Execute()

void Execute ( ) const
overridevirtual

Implements IWorkload.

Definition at line 110 of file NeonConvolution3dWorkload.cpp.

111 {
112  ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonConvolution3dWorkload_Execute", this->GetGuid());
113  m_ConvolutionLayer->run();
114 }

References ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID, and BaseWorkload< Convolution3dQueueDescriptor >::GetGuid().


The documentation for this class was generated from the following files:
armnn::BaseWorkload< Convolution3dQueueDescriptor >::GetGuid
arm::pipe::ProfilingGuid GetGuid() const final
Definition: Workload.hpp:61
armnn::QueueDescriptor::ValidateInputsOutputs
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
Definition: WorkloadData.cpp:475
armnn::DataLayout
DataLayout
Definition: Types.hpp:62
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::BaseWorkload< Convolution3dQueueDescriptor >::m_Data
Convolution3dQueueDescriptor m_Data
Definition: Workload.hpp:83
ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID
#define ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID(name, guid)
Definition: NeonWorkloadUtils.hpp:24
armnn::ComputeConv3DInfo
arm_compute::Conv3dInfo ComputeConv3DInfo(const armnn::Convolution3dDescriptor descriptor, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
Utility function used to setup an arm_compute::Conv3dInfo object from convolution3d descriptor.
Definition: ArmComputeUtils.hpp:293
armnn::Convolution3dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NDHWC, NCDHW).
Definition: Descriptors.hpp:623
armnn::QueueDescriptorWithParameters::m_Parameters
LayerDescriptor m_Parameters
Definition: WorkloadData.hpp:66
ARMNN_ASSERT
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
ARMNN_REPORT_PROFILING_WORKLOAD_DESC
#define ARMNN_REPORT_PROFILING_WORKLOAD_DESC(name, desc, infos, guid)
Definition: Profiling.hpp:227
armnn::QueueDescriptor::m_Outputs
std::vector< ITensorHandle * > m_Outputs
Definition: WorkloadData.hpp:27
armnn::Convolution3dDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:621
armnn::QueueDescriptor::m_Inputs
std::vector< ITensorHandle * > m_Inputs
Definition: WorkloadData.hpp:26
armnn::BoostLogSeverityMapping::info
@ info