ArmNN
 22.02
NeonWorkloadFactory Class Reference

#include <NeonWorkloadFactory.hpp>

Inheritance diagram for NeonWorkloadFactory:
WorkloadFactoryBase IWorkloadFactory

Public Member Functions

 NeonWorkloadFactory (const std::shared_ptr< NeonMemoryManager > &memoryManager)
 
 NeonWorkloadFactory (const std::shared_ptr< NeonMemoryManager > &memoryManager, const IBackendInternal::IBackendSpecificModelContextPtr &modelContextPtr)
 
const BackendIdGetBackendId () const override
 
bool SupportsSubTensors () const override
 
std::unique_ptr< ITensorHandleCreateSubTensorHandle (ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const override
 
std::unique_ptr< ITensorHandleCreateTensorHandle (const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const override
 
std::unique_ptr< ITensorHandleCreateTensorHandle (const TensorInfo &tensorInfo, DataLayout dataLayout, const bool IsMemoryManaged=true) const override
 
std::unique_ptr< IWorkloadCreateWorkload (LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const override
 
- Public Member Functions inherited from WorkloadFactoryBase
std::unique_ptr< IWorkloadCreateInput (const InputQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateActivation (const ActivationQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateAddition (const AdditionQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateArgMinMax (const ArgMinMaxQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateBatchNormalization (const BatchNormalizationQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateBatchToSpaceNd (const BatchToSpaceNdQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateComparison (const ComparisonQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateConcat (const ConcatQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateConstant (const ConstantQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateConvertFp16ToFp32 (const ConvertFp16ToFp32QueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateConvertFp32ToFp16 (const ConvertFp32ToFp16QueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateConvolution2d (const Convolution2dQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateDebug (const DebugQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateDepthToSpace (const DepthToSpaceQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateDepthwiseConvolution2d (const DepthwiseConvolution2dQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateDequantize (const DequantizeQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateDetectionPostProcess (const DetectionPostProcessQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateDivision (const DivisionQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateElementwiseUnary (const ElementwiseUnaryQueueDescriptor &descriptor, const WorkloadInfo &info) const override
 
std::unique_ptr< IWorkloadCreateFakeQuantization (const FakeQuantizationQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateFloor (const FloorQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateFullyConnected (const FullyConnectedQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateGather (const GatherQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateInstanceNormalization (const InstanceNormalizationQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateL2Normalization (const L2NormalizationQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateLogSoftmax (const LogSoftmaxQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateLstm (const LstmQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateMaximum (const MaximumQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateMean (const MeanQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateMemCopy (const MemCopyQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateMemImport (const MemImportQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateMerge (const MergeQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateMinimum (const MinimumQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateMultiplication (const MultiplicationQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateNormalization (const NormalizationQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateOutput (const OutputQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreatePad (const PadQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreatePermute (const PermuteQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreatePooling2d (const Pooling2dQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreatePooling3d (const Pooling3dQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreatePreCompiled (const PreCompiledQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreatePrelu (const PreluQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateQuantize (const QuantizeQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateQuantizedLstm (const QuantizedLstmQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateRank (const RankQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateReshape (const ReshapeQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateResize (const ResizeQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateSlice (const SliceQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateSoftmax (const SoftmaxQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateSpaceToBatchNd (const SpaceToBatchNdQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateSpaceToDepth (const SpaceToDepthQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateSubtraction (const SubtractionQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateSplitter (const SplitterQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateStack (const StackQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateStridedSlice (const StridedSliceQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateSwitch (const SwitchQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateTranspose (const TransposeQueueDescriptor &, const WorkloadInfo &) const override
 
std::unique_ptr< IWorkloadCreateTransposeConvolution2d (const TransposeConvolution2dQueueDescriptor &, const WorkloadInfo &) const override
 
- Public Member Functions inherited from IWorkloadFactory
virtual ~IWorkloadFactory ()
 
virtual void AfterWorkloadsCreated ()
 

Static Public Member Functions

static bool IsLayerSupported (const Layer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
 
static bool IsLayerSupported (const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported, const ModelOptions &modelOptions)
 
- Static Public Member Functions inherited from IWorkloadFactory
static bool IsLayerSupported (const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
 
static bool IsLayerSupported (const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
 
static bool IsLayerSupported (const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported, const ModelOptions &modelOptions)
 
static bool IsLayerSupported (const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported, const ModelOptions &modelOptions)
 

Detailed Description

Definition at line 20 of file NeonWorkloadFactory.hpp.

Constructor & Destructor Documentation

◆ NeonWorkloadFactory() [1/2]

NeonWorkloadFactory ( const std::shared_ptr< NeonMemoryManager > &  memoryManager)

Definition at line 73 of file NeonWorkloadFactory.cpp.

74  : m_MemoryManager(memoryManager), m_ModelContextPtr(IBackendInternal::IBackendSpecificModelContextPtr{})
75 {
76  SetNumberOfThreads();
77 }
std::shared_ptr< IBackendModelContext > IBackendSpecificModelContextPtr

◆ NeonWorkloadFactory() [2/2]

NeonWorkloadFactory ( const std::shared_ptr< NeonMemoryManager > &  memoryManager,
const IBackendInternal::IBackendSpecificModelContextPtr modelContextPtr 
)

Definition at line 79 of file NeonWorkloadFactory.cpp.

81  : m_MemoryManager(memoryManager), m_ModelContextPtr(modelContextPtr)
82 {
83  SetNumberOfThreads();
84 }

Member Function Documentation

◆ CreateSubTensorHandle()

std::unique_ptr< ITensorHandle > CreateSubTensorHandle ( ITensorHandle parent,
TensorShape const &  subTensorShape,
unsigned int const *  subTensorOrigin 
) const
overridevirtual

Reimplemented from WorkloadFactoryBase.

Definition at line 86 of file NeonWorkloadFactory.cpp.

Referenced by NeonWorkloadFactory::SupportsSubTensors().

89 {
90  const arm_compute::TensorShape shape = armcomputetensorutils::BuildArmComputeTensorShape(subTensorShape);
91 
93  coords.set_num_dimensions(subTensorShape.GetNumDimensions());
94  for (unsigned int i = 0; i < subTensorShape.GetNumDimensions(); i++)
95  {
96  // Arm compute indexes tensor coords in reverse order.
97  unsigned int revertedIndex = subTensorShape.GetNumDimensions() - i - 1;
98  coords.set(i, armnn::numeric_cast<int>(subTensorOrigin[revertedIndex]));
99  }
100 
101  const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.GetShape());
102  if (!::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, parentShape, coords, shape))
103  {
104  return nullptr;
105  }
106 
107  return std::make_unique<NeonSubTensorHandle>(
108  PolymorphicDowncast<IAclTensorHandle*>(&parent), shape, coords);
109 }
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates

◆ CreateTensorHandle() [1/2]

std::unique_ptr< ITensorHandle > CreateTensorHandle ( const TensorInfo tensorInfo,
const bool  IsMemoryManaged = true 
) const
overridevirtual

Reimplemented from WorkloadFactoryBase.

Definition at line 111 of file NeonWorkloadFactory.cpp.

Referenced by NeonWorkloadFactory::SupportsSubTensors(), and TEST_SUITE().

113 {
114  auto tensorHandle = std::make_unique<NeonTensorHandle>(tensorInfo);
115  if (IsMemoryManaged)
116  {
117  tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
118  }
119  return tensorHandle;
120 }

◆ CreateTensorHandle() [2/2]

std::unique_ptr< ITensorHandle > CreateTensorHandle ( const TensorInfo tensorInfo,
DataLayout  dataLayout,
const bool  IsMemoryManaged = true 
) const
overridevirtual

Reimplemented from WorkloadFactoryBase.

Definition at line 122 of file NeonWorkloadFactory.cpp.

125 {
126  auto tensorHandle = std::make_unique<NeonTensorHandle>(tensorInfo, dataLayout);
127  if (IsMemoryManaged)
128  {
129  tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
130  }
131  return tensorHandle;
132 }

◆ CreateWorkload()

std::unique_ptr< IWorkload > CreateWorkload ( LayerType  type,
const QueueDescriptor descriptor,
const WorkloadInfo info 
) const
overridevirtual

Reimplemented from WorkloadFactoryBase.

Definition at line 134 of file NeonWorkloadFactory.cpp.

References armnn::Abs, armnn::Activation, armnn::Addition, armnn::ArgMinMax, armnn::BatchNormalization, armnn::BatchToSpaceNd, armnn::Cast, armnn::ChannelShuffle, armnn::Comparison, armnn::Concat, armnn::Constant, armnn::ConvertBf16ToFp32, armnn::ConvertFp16ToFp32, armnn::ConvertFp32ToBf16, armnn::ConvertFp32ToFp16, armnn::Convolution2d, armnn::Convolution3d, WorkloadFactoryBase::CreateActivation(), WorkloadFactoryBase::CreateAddition(), WorkloadFactoryBase::CreateArgMinMax(), WorkloadFactoryBase::CreateBatchNormalization(), WorkloadFactoryBase::CreateBatchToSpaceNd(), WorkloadFactoryBase::CreateComparison(), WorkloadFactoryBase::CreateConcat(), WorkloadFactoryBase::CreateConstant(), WorkloadFactoryBase::CreateConvertFp16ToFp32(), WorkloadFactoryBase::CreateConvertFp32ToFp16(), WorkloadFactoryBase::CreateConvolution2d(), WorkloadFactoryBase::CreateDebug(), WorkloadFactoryBase::CreateDepthToSpace(), WorkloadFactoryBase::CreateDepthwiseConvolution2d(), WorkloadFactoryBase::CreateDequantize(), WorkloadFactoryBase::CreateDetectionPostProcess(), WorkloadFactoryBase::CreateDivision(), WorkloadFactoryBase::CreateElementwiseUnary(), WorkloadFactoryBase::CreateFloor(), WorkloadFactoryBase::CreateFullyConnected(), WorkloadFactoryBase::CreateGather(), WorkloadFactoryBase::CreateInput(), WorkloadFactoryBase::CreateInstanceNormalization(), WorkloadFactoryBase::CreateL2Normalization(), WorkloadFactoryBase::CreateLogSoftmax(), WorkloadFactoryBase::CreateLstm(), WorkloadFactoryBase::CreateMaximum(), WorkloadFactoryBase::CreateMean(), WorkloadFactoryBase::CreateMemCopy(), WorkloadFactoryBase::CreateMemImport(), WorkloadFactoryBase::CreateMinimum(), WorkloadFactoryBase::CreateMultiplication(), WorkloadFactoryBase::CreateNormalization(), WorkloadFactoryBase::CreateOutput(), WorkloadFactoryBase::CreatePad(), WorkloadFactoryBase::CreatePermute(), WorkloadFactoryBase::CreatePooling2d(), WorkloadFactoryBase::CreatePreCompiled(), WorkloadFactoryBase::CreatePrelu(), WorkloadFactoryBase::CreateQuantize(), WorkloadFactoryBase::CreateQuantizedLstm(), WorkloadFactoryBase::CreateRank(), WorkloadFactoryBase::CreateReshape(), WorkloadFactoryBase::CreateResize(), WorkloadFactoryBase::CreateSlice(), WorkloadFactoryBase::CreateSoftmax(), WorkloadFactoryBase::CreateSpaceToBatchNd(), WorkloadFactoryBase::CreateSpaceToDepth(), WorkloadFactoryBase::CreateSplitter(), WorkloadFactoryBase::CreateStack(), WorkloadFactoryBase::CreateStridedSlice(), WorkloadFactoryBase::CreateSubtraction(), WorkloadFactoryBase::CreateTranspose(), WorkloadFactoryBase::CreateTransposeConvolution2d(), armnn::Debug, armnn::DepthToSpace, armnn::DepthwiseConvolution2d, armnn::Dequantize, armnn::DetectionPostProcess, armnn::Division, armnn::ElementwiseUnary, armnn::Exp, armnn::Fill, armnn::Floor, armnn::FullyConnected, armnn::Gather, armnn::info, armnn::Input, armnn::InstanceNormalization, NeonBackendModelContext::IsFastMathEnabled(), armnn::L2Normalization, armnn::Log, armnn::LogicalAnd, armnn::LogicalBinary, armnn::LogicalNot, armnn::LogicalOr, armnn::LogSoftmax, armnn::Lstm, QueueDescriptor::m_Inputs, ElementwiseUnaryDescriptor::m_Operation, LogicalBinaryDescriptor::m_Operation, QueueDescriptor::m_Outputs, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, armnn::Maximum, armnn::Mean, armnn::MemCopy, armnn::MemImport, armnn::Minimum, armnn::Multiplication, armnn::Neg, armnn::Normalization, armnn::Output, armnn::Pad, armnn::Permute, armnn::Pooling2d, armnn::PreCompiled, armnn::Prelu, armnn::QLstm, armnn::Quantize, armnn::QuantizedLstm, armnn::Rank, armnn::Reduce, armnn::Reshape, armnn::Resize, armnn::Rsqrt, armnn::Sin, armnn::Slice, armnn::Softmax, armnn::SpaceToBatchNd, armnn::SpaceToDepth, armnn::Splitter, armnn::Stack, armnn::StridedSlice, armnn::Subtraction, armnn::Transpose, and armnn::TransposeConvolution2d.

Referenced by NeonWorkloadFactory::SupportsSubTensors(), and TEST_SUITE().

137 {
138  switch(type)
139  {
140  case LayerType::Activation :
141  {
142  auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
143  return std::make_unique<NeonActivationWorkload>(*activationQueueDescriptor, info);
144  }
145  case LayerType::Addition :
146  {
147  auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
148  return std::make_unique<NeonAdditionWorkload>(*additionQueueDescriptor, info);
149  }
150  case LayerType::ArgMinMax :
151  {
152  auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
153  return std::make_unique<NeonArgMinMaxWorkload>(*argMinMaxQueueDescriptor, info);
154  }
156  {
157  auto batchNormalizationQueueDescriptor
158  = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
159  return std::make_unique<NeonBatchNormalizationWorkload>(*batchNormalizationQueueDescriptor, info);
160  }
162  {
163  auto batchToSpaceNdQueueDescriptor
164  = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
165  return std::make_unique<NeonBatchToSpaceNdWorkload>(*batchToSpaceNdQueueDescriptor, info);
166  }
167  case LayerType::Cast :
168  {
169  auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
170  return std::make_unique<NeonCastWorkload>(*castQueueDescriptor, info);
171  }
173  {
174  auto channelShuffleQueueDescriptor = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
175  return std::make_unique<NeonChannelShuffleWorkload>(*channelShuffleQueueDescriptor, info);
176  }
177  case LayerType::Comparison :
178  {
179  auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
180  return std::make_unique<NeonComparisonWorkload>(*comparisonQueueDescriptor, info);
181  }
182  case LayerType::Concat :
183  {
184  auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
185  return std::make_unique<NeonConcatWorkload>(*concatQueueDescriptor, info);
186  }
187  case LayerType::Constant :
188  {
189  auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
190  return std::make_unique<NeonConstantWorkload>(*constantQueueDescriptor, info);
191  }
193  {
194  auto convertBf16ToFp32QueueDescriptor
195  = PolymorphicDowncast<const ConvertBf16ToFp32QueueDescriptor*>(&descriptor);
196  return std::make_unique<NeonConvertBf16ToFp32Workload>(*convertBf16ToFp32QueueDescriptor, info);
197  }
199  {
200  auto convertFp16ToFp32QueueDescriptor
201  = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
202  return std::make_unique<NeonConvertFp16ToFp32Workload>(*convertFp16ToFp32QueueDescriptor, info);
203  }
205  {
206  auto convertFp32ToBf16QueueDescriptor
207  = PolymorphicDowncast<const ConvertFp32ToBf16QueueDescriptor*>(&descriptor);
208  return std::make_unique<NeonConvertFp32ToBf16Workload>(*convertFp32ToBf16QueueDescriptor, info);
209  }
211  {
212  auto convertFp32ToFp16QueueDescriptor
213  = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
214  return std::make_unique<NeonConvertFp32ToFp16Workload>(*convertFp32ToFp16QueueDescriptor, info);
215  }
217  {
218  auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
219 
220  bool isFastMathEnabled = false;
221  if (m_ModelContextPtr)
222  {
223  if (m_ModelContextPtr.get() != nullptr)
224  {
225  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
226  if (modelOptions)
227  {
228  isFastMathEnabled = modelOptions->IsFastMathEnabled();
229  }
230  }
231  }
232  return std::make_unique<NeonConvolution2dWorkload>(*convolution2dQueueDescriptor,
233  info,
234  m_MemoryManager->GetIntraLayerManager(),
235  isFastMathEnabled);
236  }
238  {
239  auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
240 
241  bool isFastMathEnabled = false;
242  if (m_ModelContextPtr)
243  {
244  if (m_ModelContextPtr.get() != nullptr)
245  {
246  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
247  if (modelOptions)
248  {
249  isFastMathEnabled = modelOptions->IsFastMathEnabled();
250  }
251  }
252  }
253  return std::make_unique<NeonConvolution3dWorkload>(*convolution3dQueueDescriptor,
254  info,
255  m_MemoryManager->GetIntraLayerManager(),
256  isFastMathEnabled);
257  }
258  case LayerType::Debug :
259  {
260  auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
261  return MakeWorkloadHelper<NullWorkload, NullWorkload>(*debugQueueDescriptor, info);
262  }
264  {
265  auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
266  return std::make_unique<NeonDepthToSpaceWorkload>(*depthToSpaceQueueDescriptor, info);
267  }
269  {
270  auto depthwiseConvolution2dQueueDescriptor
271  = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
272  return std::make_unique<NeonDepthwiseConvolutionWorkload>(*depthwiseConvolution2dQueueDescriptor, info);
273  }
274  case LayerType::Dequantize :
275  {
276  auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
277  return std::make_unique<NeonDequantizeWorkload>(*dequantizeQueueDescriptor, info);
278  }
280  {
281  auto detectionPostProcessQueueDescriptor
282  = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
283  return MakeWorkloadHelper<NullWorkload, NullWorkload>(*detectionPostProcessQueueDescriptor, info);
284  }
285  case LayerType::Division :
286  {
287  auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
288  return std::make_unique<NeonDivisionWorkload>(*divisionQueueDescriptor, info);
289  }
291  {
292  auto elementwiseUnaryQueueDescriptor
293  = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
294 
295  switch(elementwiseUnaryQueueDescriptor->m_Parameters.m_Operation)
296  {
297  case UnaryOperation::Abs:
298  {
299  AbsQueueDescriptor absQueueDescriptor;
300  absQueueDescriptor.m_Inputs = elementwiseUnaryQueueDescriptor->m_Inputs;
301  absQueueDescriptor.m_Outputs = elementwiseUnaryQueueDescriptor->m_Outputs;
302 
303  return std::make_unique<NeonAbsWorkload>(absQueueDescriptor, info);
304  }
305  case UnaryOperation::Exp:
306  return std::make_unique<NeonExpWorkload>(*elementwiseUnaryQueueDescriptor, info);
308  return std::make_unique<NeonLogicalNotWorkload>(*elementwiseUnaryQueueDescriptor, info);
309  case UnaryOperation::Log:
310  return std::make_unique<NeonLogWorkload>(*elementwiseUnaryQueueDescriptor, info);
311  case UnaryOperation::Neg:
312  return std::make_unique<NeonNegWorkload>(*elementwiseUnaryQueueDescriptor, info);
314  {
315  RsqrtQueueDescriptor rsqrtQueueDescriptor;
316  rsqrtQueueDescriptor.m_Inputs = elementwiseUnaryQueueDescriptor->m_Inputs;
317  rsqrtQueueDescriptor.m_Outputs = elementwiseUnaryQueueDescriptor->m_Outputs;
318 
319  return std::make_unique<NeonRsqrtWorkload>(rsqrtQueueDescriptor, info);
320  }
321  case UnaryOperation::Sin:
322  return std::make_unique<NeonSinWorkload>(*elementwiseUnaryQueueDescriptor, info);
323  default:
324  return nullptr;
325  }
326  }
327  case LayerType::Fill :
328  {
329  auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
330  return std::make_unique<NeonFillWorkload>(*fillQueueDescriptor, info);
331  }
332  case LayerType::Floor :
333  {
334  auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
335  return MakeWorkloadHelper<NeonFloorFloatWorkload, NullWorkload>(*floorQueueDescriptor, info);
336  }
338  {
339  auto fullyConnectedQueueDescriptor = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
340  return std::make_unique<NeonFullyConnectedWorkload>(*fullyConnectedQueueDescriptor,
341  info,
342  m_MemoryManager->GetIntraLayerManager());
343  }
344  case LayerType::Gather :
345  {
346  auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
347  return std::make_unique<NeonGatherWorkload>(*gatherQueueDescriptor, info);
348  }
349  case LayerType::Input :
350  {
351  auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
352  return std::make_unique<CopyMemGenericWorkload>(*inputQueueDescriptor, info);
353  }
355  {
356  auto instanceNormalizationQueueDescriptor
357  = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
358  return std::make_unique<NeonInstanceNormalizationWorkload>(*instanceNormalizationQueueDescriptor, info);
359  }
361  {
362  auto l2NormalizationQueueDescriptor
363  = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
364  return MakeWorkloadHelper<NeonL2NormalizationFloatWorkload, NullWorkload>
365  (*l2NormalizationQueueDescriptor, info, m_MemoryManager->GetIntraLayerManager());
366  }
367  case LayerType::LogSoftmax :
368  {
369  auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
370  return std::make_unique<NeonLogSoftmaxWorkload>(*logSoftmaxQueueDescriptor,
371  info,
372  m_MemoryManager->GetIntraLayerManager());
373  }
375  {
376  auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
377 
378  switch(logicalBinaryQueueDescriptor->m_Parameters.m_Operation)
379  {
381  return std::make_unique<NeonLogicalAndWorkload>(*logicalBinaryQueueDescriptor, info);
383  return std::make_unique<NeonLogicalOrWorkload>(*logicalBinaryQueueDescriptor, info);
384  default:
385  return nullptr;
386  }
387  }
388  case LayerType::Lstm :
389  {
390  auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
391  return MakeWorkloadHelper<NeonLstmFloatWorkload, NullWorkload>(*lstmQueueDescriptor, info);
392  }
393  case LayerType::Maximum :
394  {
395  auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
396  return std::make_unique<NeonMaximumWorkload>(*maximumQueueDescriptor, info);
397  }
398  case LayerType::Mean :
399  {
400  auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
401  return std::make_unique<NeonMeanWorkload>(*meanQueueDescriptor, info);
402  }
403  case LayerType::MemCopy :
404  {
405  auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
406  if (memCopyQueueDescriptor->m_Inputs.empty() || !memCopyQueueDescriptor->m_Inputs[0])
407  {
408  throw InvalidArgumentException("NeonWorkloadFactory: Invalid null input for MemCopy workload");
409  }
410  return MakeWorkloadHelper<CopyMemGenericWorkload, CopyMemGenericWorkload>(*memCopyQueueDescriptor, info);
411  }
412  case LayerType::MemImport :
413  {
414  auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
415  if (memImportQueueDescriptor->m_Inputs.empty() || !memImportQueueDescriptor->m_Inputs[0])
416  {
417  throw InvalidArgumentException("NeonWorkloadFactory: Invalid null input for MemImport workload");
418  }
419  return std::make_unique<ImportMemGenericWorkload>(*memImportQueueDescriptor, info);
420  }
421  case LayerType::Minimum :
422  {
423  auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
424  return std::make_unique<NeonMinimumWorkload>(*minimumQueueDescriptor, info);
425  }
427  {
428  auto multiplicationQueueDescriptor = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
429  return std::make_unique<NeonMultiplicationWorkload>(*multiplicationQueueDescriptor, info);
430  }
432  {
433  auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
434  return MakeWorkloadHelper<NeonNormalizationFloatWorkload, NullWorkload>
435  (*normalizationQueueDescriptor, info, m_MemoryManager->GetIntraLayerManager());
436  }
437  case LayerType::Output :
438  {
439  auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
440  return std::make_unique<CopyMemGenericWorkload>(*outputQueueDescriptor, info);
441  }
442  case LayerType::Pad :
443  {
444  auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
445  return std::make_unique<NeonPadWorkload>(*padQueueDescriptor, info);
446  }
447  case LayerType::Permute :
448  {
449  auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
450  return std::make_unique<NeonPermuteWorkload>(*permuteQueueDescriptor, info);
451  }
452  case LayerType::Pooling2d :
453  {
454  auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
455  return std::make_unique<NeonPooling2dWorkload>(*pooling2dQueueDescriptor, info);
456  }
458  {
459  auto preCompiledQueueDescriptor = PolymorphicDowncast<const PreCompiledQueueDescriptor*>(&descriptor);
460  return MakeWorkloadHelper<NullWorkload, NullWorkload>(*preCompiledQueueDescriptor, info);
461  }
462  case LayerType::Prelu :
463  {
464  auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
465  return std::make_unique<NeonPreluWorkload>(*preluQueueDescriptor, info);
466  }
467  case LayerType::QLstm :
468  {
469  auto qLstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
470  return std::make_unique<NeonQLstmWorkload>(*qLstmQueueDescriptor, info);
471  }
472  case LayerType::Quantize :
473  {
474  auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
475  return std::make_unique<NeonQuantizeWorkload>(*quantizeQueueDescriptor, info);
476  }
478  {
479  auto quantizedLstmQueueDescriptor = PolymorphicDowncast<const QuantizedLstmQueueDescriptor*>(&descriptor);
480  return std::make_unique<NeonQuantizedLstmWorkload>(*quantizedLstmQueueDescriptor, info);
481  }
482  case LayerType::Rank :
483  {
484  auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
485  return std::make_unique<NeonRankWorkload>(*rankQueueDescriptor, info);
486  }
487  case LayerType::Reduce :
488  {
489  auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
490  return std::make_unique<NeonReduceWorkload>(*reduceQueueDescriptor, info);
491  }
492  case LayerType::Reshape :
493  {
494  auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
495  return std::make_unique<NeonReshapeWorkload>(*reshapeQueueDescriptor, info);
496  }
497  case LayerType::Resize :
498  {
499  auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
500  return std::make_unique<NeonResizeWorkload>(*resizeQueueDescriptor, info);
501  }
502  case LayerType::Slice :
503  {
504  auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
505  return std::make_unique<NeonSliceWorkload>(*sliceQueueDescriptor, info);
506  }
507  case LayerType::Softmax :
508  {
509  auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
510  return std::make_unique<NeonSoftmaxWorkload>(*softmaxQueueDescriptor,
511  info,
512  m_MemoryManager->GetIntraLayerManager());
513  }
515  {
516  auto spaceToBatchNdQueueDescriptor
517  = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
518  return std::make_unique<NeonSpaceToBatchNdWorkload>(*spaceToBatchNdQueueDescriptor, info);
519  }
521  {
522  auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
523  return std::make_unique<NeonSpaceToDepthWorkload>(*spaceToDepthQueueDescriptor, info);
524  }
525  case LayerType::Splitter :
526  {
527  auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
528  return std::make_unique<NeonSplitterWorkload>(*splitterQueueDescriptor, info);
529  }
530  case LayerType::Stack :
531  {
532  auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
533  return std::make_unique<NeonStackWorkload>(*stackQueueDescriptor, info);
534  }
536  {
537  auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
538  return std::make_unique<NeonStridedSliceWorkload>(*stridedSliceQueueDescriptor, info);
539  }
541  {
542  auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
543  return std::make_unique<NeonSubtractionWorkload>(*subtractionQueueDescriptor, info);
544  }
545  case LayerType::Transpose :
546  {
547  auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
548  return std::make_unique<NeonTransposeWorkload>(*transposeQueueDescriptor, info);
549  }
551  {
552  auto transposeConvolution2dQueueDescriptor
553  = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
554  return std::make_unique<NeonTransposeConvolution2dWorkload>(*transposeConvolution2dQueueDescriptor,
555  info,
556  m_MemoryManager->GetIntraLayerManager());
557  }
558  default:
559  return nullptr;
560  }
561 }

◆ GetBackendId()

const BackendId & GetBackendId ( ) const
overridevirtual

Implements IWorkloadFactory.

Definition at line 49 of file NeonWorkloadFactory.cpp.

References NeonBackendModelContext::GetNumberOfThreads().

50 {
51  return s_Id;
52 }

◆ IsLayerSupported() [1/2]

bool IsLayerSupported ( const Layer layer,
Optional< DataType dataType,
std::string &  outReasonIfUnsupported 
)
static

Definition at line 34 of file NeonWorkloadFactory.cpp.

References IWorkloadFactory::IsLayerSupported().

37 {
38  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported);
39 }
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)

◆ IsLayerSupported() [2/2]

bool IsLayerSupported ( const IConnectableLayer layer,
Optional< DataType dataType,
std::string &  outReasonIfUnsupported,
const ModelOptions modelOptions 
)
static

Definition at line 41 of file NeonWorkloadFactory.cpp.

References IWorkloadFactory::IsLayerSupported().

45 {
46  return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported, modelOptions);
47 }
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)

◆ SupportsSubTensors()

bool SupportsSubTensors ( ) const
inlineoverridevirtual

Reimplemented from WorkloadFactoryBase.

Definition at line 39 of file NeonWorkloadFactory.hpp.

References ARMNN_DEPRECATED_MSG, armnn::ARMNN_DEPRECATED_MSG_REMOVAL_DATE(), WorkloadFactoryBase::CreateActivation(), WorkloadFactoryBase::CreateAddition(), WorkloadFactoryBase::CreateArgMinMax(), WorkloadFactoryBase::CreateBatchNormalization(), WorkloadFactoryBase::CreateBatchToSpaceNd(), WorkloadFactoryBase::CreateComparison(), WorkloadFactoryBase::CreateConcat(), WorkloadFactoryBase::CreateConstant(), WorkloadFactoryBase::CreateConvertFp16ToFp32(), WorkloadFactoryBase::CreateConvertFp32ToFp16(), WorkloadFactoryBase::CreateConvolution2d(), WorkloadFactoryBase::CreateDebug(), WorkloadFactoryBase::CreateDepthToSpace(), WorkloadFactoryBase::CreateDepthwiseConvolution2d(), WorkloadFactoryBase::CreateDequantize(), WorkloadFactoryBase::CreateDetectionPostProcess(), WorkloadFactoryBase::CreateDivision(), WorkloadFactoryBase::CreateElementwiseUnary(), WorkloadFactoryBase::CreateFloor(), WorkloadFactoryBase::CreateFullyConnected(), WorkloadFactoryBase::CreateGather(), WorkloadFactoryBase::CreateInput(), WorkloadFactoryBase::CreateInstanceNormalization(), WorkloadFactoryBase::CreateL2Normalization(), WorkloadFactoryBase::CreateLogSoftmax(), WorkloadFactoryBase::CreateLstm(), WorkloadFactoryBase::CreateMaximum(), WorkloadFactoryBase::CreateMean(), WorkloadFactoryBase::CreateMemCopy(), WorkloadFactoryBase::CreateMemImport(), WorkloadFactoryBase::CreateMinimum(), WorkloadFactoryBase::CreateMultiplication(), WorkloadFactoryBase::CreateNormalization(), WorkloadFactoryBase::CreateOutput(), WorkloadFactoryBase::CreatePad(), WorkloadFactoryBase::CreatePermute(), WorkloadFactoryBase::CreatePooling2d(), WorkloadFactoryBase::CreatePreCompiled(), WorkloadFactoryBase::CreatePrelu(), WorkloadFactoryBase::CreateQuantize(), WorkloadFactoryBase::CreateQuantizedLstm(), WorkloadFactoryBase::CreateRank(), WorkloadFactoryBase::CreateReshape(), WorkloadFactoryBase::CreateResize(), WorkloadFactoryBase::CreateSlice(), WorkloadFactoryBase::CreateSoftmax(), WorkloadFactoryBase::CreateSpaceToBatchNd(), WorkloadFactoryBase::CreateSpaceToDepth(), WorkloadFactoryBase::CreateSplitter(), WorkloadFactoryBase::CreateStack(), WorkloadFactoryBase::CreateStridedSlice(), NeonWorkloadFactory::CreateSubTensorHandle(), WorkloadFactoryBase::CreateSubtraction(), NeonWorkloadFactory::CreateTensorHandle(), WorkloadFactoryBase::CreateTranspose(), WorkloadFactoryBase::CreateTransposeConvolution2d(), NeonWorkloadFactory::CreateWorkload(), armnn::Info, and armnn::info.

39 { return true; }

The documentation for this class was generated from the following files: