ArmNN
 20.02
NeonLayerSupport.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "NeonLayerSupport.hpp"
7 #include "NeonBackendId.hpp"
8 
9 #include <armnn/Descriptors.hpp>
10 #include <armnn/Tensor.hpp>
11 #include <armnn/Types.hpp>
13 
14 #include <InternalTypes.hpp>
15 #include <LayerSupportCommon.hpp>
17 
18 #if defined(ARMCOMPUTENEON_ENABLED)
62 #endif
63 
64 namespace armnn
65 {
66 
67 namespace
68 {
69 
70 template< typename ... Args>
71 bool IsNeonBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
72 {
73  IgnoreUnused(reasonIfUnsupported, (args)...);
74 #if defined(ARMCOMPUTENEON_ENABLED)
75  return true;
76 #else
77  SetValueChecked(reasonIfUnsupported, "The armnn library has been built without NEON support");
78  return false;
79 #endif
80 }
81 
82 template<typename FloatFunc, typename Uint8Func, typename ... Params>
83 bool IsSupportedForDataTypeNeon(Optional<std::string&> reasonIfUnsupported,
84  DataType dataType,
85  FloatFunc floatFuncPtr,
86  Uint8Func uint8FuncPtr,
87  Params&&... params)
88 {
89  return IsNeonBackendSupported(reasonIfUnsupported) &&
90  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
91  dataType,
92  floatFuncPtr,
93  floatFuncPtr,
94  uint8FuncPtr,
95  &FalseFunc<>,
96  &FalseFunc<>,
97  std::forward<Params>(params)...);
98 }
99 
100 #if defined(ARMCOMPUTENEON_ENABLED)
101 template<class FuncType, class... Args>
102 inline bool IsWorkloadSupported(FuncType& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
103 {
104  arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
105  const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
106  if (!supported && reasonIfUnsupported)
107  {
108  reasonIfUnsupported.value() = aclStatus.error_description();
109  }
110  return supported;
111 }
112 
113 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
114  return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
115 #else
116 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
117  return IsNeonBackendSupported(reasonIfUnsupported, __VA_ARGS__);
118 #endif
119 } // anonymous namespace
120 
122  const TensorInfo& output,
123  Optional<std::string&> reasonIfUnsupported) const
124 {
126  return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
127 }
128 
130  const TensorInfo& output,
131  const ActivationDescriptor& descriptor,
132  Optional<std::string&> reasonIfUnsupported) const
133 {
134  IgnoreUnused(descriptor);
136  reasonIfUnsupported,
137  input,
138  output,
139  descriptor);
140 }
141 
143  const TensorInfo& input1,
144  const TensorInfo& output,
145  Optional<std::string&> reasonIfUnsupported) const
146 {
148  reasonIfUnsupported,
149  input0,
150  input1,
151  output);
152 }
153 
155  const TensorInfo& output,
156  const ArgMinMaxDescriptor& descriptor,
157  Optional<std::string&> reasonIfUnsupported) const
158 {
160  reasonIfUnsupported,
161  input,
162  output,
163  descriptor);
164 }
165 
167  const TensorInfo& output,
168  const TensorInfo& mean,
169  const TensorInfo& var,
170  const TensorInfo& beta,
171  const TensorInfo& gamma,
172  const BatchNormalizationDescriptor& descriptor,
173  Optional<std::string&> reasonIfUnsupported) const
174 {
176  reasonIfUnsupported,
177  input,
178  output,
179  mean,
180  var,
181  beta,
182  gamma,
183  descriptor);
184 }
185 
187  const TensorInfo& output,
188  const BatchToSpaceNdDescriptor& descriptor,
189  Optional<std::string&> reasonIfUnsupported) const
190 {
192  reasonIfUnsupported,
193  input,
194  output,
195  descriptor);
196 }
197 
199  const TensorInfo& input1,
200  const TensorInfo& output,
201  const ComparisonDescriptor& descriptor,
202  Optional<std::string&> reasonIfUnsupported) const
203 {
204  if (descriptor.m_Operation == ComparisonOperation::Greater)
205  {
207  reasonIfUnsupported,
208  input0,
209  input1,
210  output);
211  }
212 
213  return false;
214 }
215 
216 bool NeonLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
217  const TensorInfo& output,
218  const ConcatDescriptor& descriptor,
219  Optional<std::string&> reasonIfUnsupported) const
220 {
221  if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
222  {
223  SetValueChecked(reasonIfUnsupported, "Neon Concat: Concat axis > Number of dimensions.");
224  return false;
225  }
226 
227  unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
228  if(concatInnerAxis < 3) // Width, height, or channels
229  {
231  reasonIfUnsupported,
232  inputs,
233  output,
234  descriptor);
235  }
236  else if (concatInnerAxis == 3)
237  {
238  for (auto& input : inputs)
239  {
240  if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
241  {
242  SetValueChecked(reasonIfUnsupported, "Neon Concat: Types and quantization parameters must match.");
243  return false;
244  }
245  }
246  return true; // Sub-tensors support concat along batch
247  }
248  else // > 4 dimensions not supported.
249  {
250  SetValueChecked(reasonIfUnsupported, "Neon Concat: Maximum of 4 dimensions supported.");
251  return false;
252  }
253 }
254 
256  Optional<std::string&> reasonIfUnsupported) const
257 {
258  return IsSupportedForDataTypeNeon(reasonIfUnsupported,
259  output.GetDataType(),
260  &TrueFunc<>,
261  &TrueFunc<>);
262 }
263 
265  const TensorInfo& output,
266  Optional<std::string&> reasonIfUnsupported) const
267 {
268  armnn::IgnoreUnused(input);
269  armnn::IgnoreUnused(output);
270  armnn::IgnoreUnused(reasonIfUnsupported);
271  return true;
272 }
273 
275  const TensorInfo& output,
276  Optional<std::string&> reasonIfUnsupported) const
277 {
278  armnn::IgnoreUnused(input);
279  armnn::IgnoreUnused(output);
280  armnn::IgnoreUnused(reasonIfUnsupported);
281  return true;
282 }
283 
285  const TensorInfo& output,
286  const Convolution2dDescriptor& descriptor,
287  const TensorInfo& weights,
288  const Optional<TensorInfo>& biases,
289  Optional<std::string&> reasonIfUnsupported) const
290 {
292  reasonIfUnsupported,
293  input,
294  output,
295  descriptor,
296  weights,
297  biases);
298 }
299 
301  const TensorInfo& output,
302  const DepthToSpaceDescriptor& descriptor,
303  Optional<std::string&> reasonIfUnsupported) const
304 {
306  reasonIfUnsupported,
307  input,
308  output,
309  descriptor);
310 }
311 
313  const TensorInfo& output,
314  const DepthwiseConvolution2dDescriptor& descriptor,
315  const TensorInfo& weights,
316  const Optional<TensorInfo>& biases,
317  Optional<std::string&> reasonIfUnsupported) const
318 {
320  reasonIfUnsupported,
321  input,
322  output,
323  descriptor,
324  weights,
325  biases);
326 }
327 
329  const TensorInfo& output,
330  Optional<std::string&> reasonIfUnsupported) const
331 {
333  reasonIfUnsupported,
334  input,
335  output);
336 }
337 
339  const TensorInfo& output,
340  const DepthwiseConvolution2dDescriptor& descriptor,
341  const TensorInfo& weights,
342  const Optional<TensorInfo>& biases,
343  Optional<std::string&> reasonIfUnsupported) const
344 {
346  reasonIfUnsupported,
347  input,
348  output,
349  descriptor,
350  weights,
351  biases);
352 }
353 
355  const TensorInfo& output,
356  const ElementwiseUnaryDescriptor& descriptor,
357  Optional<std::string&> reasonIfUnsupported) const
358 {
359  if (descriptor.m_Operation == UnaryOperation::Abs)
360  {
362  reasonIfUnsupported,
363  input,
364  output);
365  }
366  else if (descriptor.m_Operation == UnaryOperation::Rsqrt)
367  {
369  reasonIfUnsupported,
370  input,
371  output);
372  }
373 
374  return false;
375 }
376 
378  const TensorInfo& output,
379  Optional<std::string&> reasonIfUnsupported) const
380 {
381  armnn::IgnoreUnused(output);
382  return IsNeonBackendSupported(reasonIfUnsupported) &&
383  IsSupportedForDataTypeGeneric(reasonIfUnsupported,
384  input.GetDataType(),
385  &FalseFuncF16<>,
386  &TrueFunc<>,
387  &FalseFuncU8<>,
388  &FalseFuncI32<>,
389  &FalseFuncU8<>);
390 }
391 
393  const TensorInfo& output,
394  const TensorInfo& weights,
395  const TensorInfo& biases,
396  const FullyConnectedDescriptor& descriptor,
397  Optional<std::string&> reasonIfUnsupported) const
398 {
400  reasonIfUnsupported,
401  input,
402  output,
403  weights,
404  biases,
405  descriptor);
406 }
407 
409  const armnn::TensorInfo& input1,
410  const armnn::TensorInfo& output,
411  armnn::Optional<std::string&> reasonIfUnsupported) const
412 {
414  return IsComparisonSupported(input0, input1, output, descriptor, reasonIfUnsupported);
415 }
416 
418  Optional<std::string&> reasonIfUnsupported) const
419 {
420  return IsNeonBackendSupported(reasonIfUnsupported, input);
421 }
422 
424  const TensorInfo& output,
425  const InstanceNormalizationDescriptor& descriptor,
426  Optional<std::string&> reasonIfUnsupported) const
427 {
429  reasonIfUnsupported,
430  input,
431  output,
432  descriptor);
433 }
434 
436  const TensorInfo& output,
437  const L2NormalizationDescriptor& descriptor,
438  Optional<std::string&> reasonIfUnsupported) const
439 {
440  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
441 }
442 
444  const TensorInfo& outputStateIn,
445  const TensorInfo& cellStateIn,
446  const TensorInfo& scratchBuffer,
447  const TensorInfo& outputStateOut,
448  const TensorInfo& cellStateOut,
449  const TensorInfo& output,
450  const LstmDescriptor& descriptor,
451  const LstmInputParamsInfo& paramsInfo,
452  Optional<std::string&> reasonIfUnsupported) const
453 {
455  reasonIfUnsupported,
456  input,
457  outputStateIn,
458  cellStateIn,
459  scratchBuffer,
460  outputStateOut,
461  cellStateOut,
462  output,
463  descriptor,
464  paramsInfo);
465 }
466 
468  const TensorInfo& input1,
469  const TensorInfo& output,
470  Optional<std::string&> reasonIfUnsupported) const
471 {
473  reasonIfUnsupported,
474  input0,
475  input1,
476  output);
477 }
478 
480  const TensorInfo& output,
481  const MeanDescriptor& descriptor,
482  Optional<std::string&> reasonIfUnsupported) const
483 {
485  reasonIfUnsupported,
486  input,
487  output,
488  descriptor);
489 }
490 
491 bool NeonLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
492  const TensorInfo& output,
493  const MergerDescriptor& descriptor,
494  Optional<std::string&> reasonIfUnsupported) const
495 {
496  return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
497 }
498 
500  const TensorInfo& input1,
501  const TensorInfo& output,
502  Optional<std::string&> reasonIfUnsupported) const
503 {
505  reasonIfUnsupported,
506  input0,
507  input1,
508  output);
509 }
510 
512  const TensorInfo& input1,
513  const TensorInfo& output,
514  Optional<std::string&> reasonIfUnsupported) const
515 {
517  reasonIfUnsupported,
518  input0,
519  input1,
520  output);
521 }
522 
524  const TensorInfo& input1,
525  const TensorInfo& output,
526  Optional<std::string&> reasonIfUnsupported) const
527 {
529  reasonIfUnsupported,
530  input0,
531  input1,
532  output);
533 }
534 
536  const TensorInfo& output,
537  const NormalizationDescriptor& descriptor,
538  Optional<std::string&> reasonIfUnsupported) const
539 {
541  reasonIfUnsupported,
542  input,
543  output,
544  descriptor);
545 }
546 
548  Optional<std::string&> reasonIfUnsupported) const
549 {
550  return IsNeonBackendSupported(reasonIfUnsupported, output);
551 }
552 
554  const TensorInfo& output,
555  const PadDescriptor& descriptor,
556  Optional<std::string&> reasonIfUnsupported) const
557 {
559  reasonIfUnsupported,
560  input,
561  output,
562  descriptor);
563 }
564 
566  const TensorInfo& output,
567  const PermuteDescriptor& descriptor,
568  Optional<std::string&> reasonIfUnsupported) const
569 {
570  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
571 }
572 
574  const TensorInfo& output,
575  const Pooling2dDescriptor& descriptor,
576  Optional<std::string&> reasonIfUnsupported) const
577 {
578  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
579 }
580 
582  const armnn::TensorInfo &alpha,
583  const armnn::TensorInfo &output,
584  armnn::Optional<std::string &> reasonIfUnsupported) const
585 {
586  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
587 }
588 
590  const TensorInfo& output,
591  Optional<std::string&> reasonIfUnsupported) const
592 {
594  reasonIfUnsupported,
595  input,
596  output);
597 }
598 
600  const TensorInfo& cellStateIn,
601  const TensorInfo& outputStateIn,
602  const TensorInfo& cellStateOut,
603  const TensorInfo& outputStateOut,
604  const QuantizedLstmInputParamsInfo& paramsInfo,
605  Optional<std::string&> reasonIfUnsupported) const
606 {
608  reasonIfUnsupported,
609  input,
610  cellStateIn,
611  outputStateIn,
612  cellStateOut,
613  outputStateOut,
614  paramsInfo);
615 }
616 
618  const TensorInfo& output,
619  const ReshapeDescriptor& descriptor,
620  Optional<std::string&> reasonIfUnsupported) const
621 {
622  armnn::IgnoreUnused(descriptor);
624  reasonIfUnsupported,
625  input,
626  output);
627 }
628 
630  const TensorInfo& output,
631  const ResizeDescriptor& descriptor,
632  Optional<std::string&> reasonIfUnsupported) const
633 {
635  reasonIfUnsupported,
636  input,
637  output,
638  descriptor);
639 }
640 
642  const TensorInfo& output,
643  Optional<std::string&> reasonIfUnsupported) const
644 {
645  ResizeDescriptor descriptor;
646  descriptor.m_Method = ResizeMethod::Bilinear;
647  descriptor.m_DataLayout = DataLayout::NCHW;
648 
649  const TensorShape& outputShape = output.GetShape();
650  descriptor.m_TargetHeight = outputShape[2];
651  descriptor.m_TargetWidth = outputShape[3];
652 
653  return IsResizeSupported(input, output, descriptor, reasonIfUnsupported);
654 }
655 
657  const TensorInfo& output,
658  Optional<std::string&> reasonIfUnsupported) const
659 {
661  return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
662 }
663 
665  const TensorInfo& output,
666  const SliceDescriptor& descriptor,
667  Optional<std::string&> reasonIfUnsupported) const
668 {
670  reasonIfUnsupported,
671  input,
672  output,
673  descriptor);
674 }
675 
677  const TensorInfo& output,
678  const SoftmaxDescriptor& descriptor,
679  Optional<std::string&> reasonIfUnsupported) const
680 {
681  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
682 }
683 
685  const TensorInfo& output,
686  const SpaceToBatchNdDescriptor& descriptor,
687  Optional<std::string&> reasonIfUnsupported) const
688 {
690  reasonIfUnsupported,
691  input,
692  output,
693  descriptor);
694 }
695 
697  const TensorInfo& output,
698  const SpaceToDepthDescriptor& descriptor,
699  Optional<std::string&> reasonIfUnsupported) const
700 {
702  reasonIfUnsupported,
703  input,
704  output,
705  descriptor);
706 }
707 
709  const ViewsDescriptor& descriptor,
710  Optional<std::string&> reasonIfUnsupported) const
711 {
712  armnn::IgnoreUnused(descriptor);
713  return IsSupportedForDataTypeNeon(reasonIfUnsupported,
714  input.GetDataType(),
715  &TrueFunc<>,
716  &TrueFunc<>);
717 }
718 
720  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
721  const ViewsDescriptor& descriptor,
722  Optional<std::string&> reasonIfUnsupported) const
723 {
724 #if defined(ARMCOMPUTENEON_ENABLED)
725  // Split along the last dimension, cannot use sub-tensors
726  // as width and height of the sub-tensors do not match
727  // the width and height of the parent tensor
728  // in case of input with more than 2D.
729  std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
730  if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
731  *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
732  {
734  reasonIfUnsupported,
735  input,
736  outputs,
737  *splitAxis.begin());
738  }
739 #endif
740  IgnoreUnused(descriptor);
741  for (auto output : outputs)
742  {
743  if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
744  {
745  SetValueChecked(reasonIfUnsupported, "Neon Splitter: Types and quantization parameters must match.");
746  return false;
747  }
748  }
749  return true;
750 }
751 
752 bool NeonLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
753  const TensorInfo& output,
754  const StackDescriptor& descriptor,
755  Optional<std::string&> reasonIfUnsupported) const
756 {
758  reasonIfUnsupported,
759  inputs,
760  output,
761  descriptor);
762 }
763 
765  const TensorInfo& output,
766  const StridedSliceDescriptor& descriptor,
767  Optional<std::string&> reasonIfUnsupported) const
768 {
770  reasonIfUnsupported,
771  input,
772  output,
773  descriptor);
774 }
775 
777  const TensorInfo& input1,
778  const TensorInfo& output,
779  Optional<std::string&> reasonIfUnsupported) const
780 {
782  reasonIfUnsupported,
783  input0,
784  input1,
785  output);
786 }
787 
789  const TensorInfo& output,
790  const TransposeConvolution2dDescriptor& descriptor,
791  const TensorInfo& weights,
792  const Optional<TensorInfo>& biases,
793  Optional<std::string&> reasonIfUnsupported) const
794 {
796  reasonIfUnsupported,
797  input,
798  output,
799  descriptor,
800  weights,
801  biases);
802 }
803 
805  const TensorInfo& output,
806  const TransposeDescriptor& descriptor,
807  Optional<std::string&> reasonIfUnsupported) const
808 {
809  FORWARD_WORKLOAD_VALIDATE_FUNC(NeonTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
810 }
811 
812 } // namespace armnn
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
Definition: Descriptors.hpp:98
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ViewsDescriptor for the SplitterLayer.
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same...
Definition: Tensor.cpp:218
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonSpaceToDepthWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor)
arm_compute::Status NeonSplitterWorkloadValidate(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, unsigned int splitAxis)
arm_compute::Status NeonMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
const TensorShape & GetShape() const
Definition: Tensor.hpp:88
A ReshapeDescriptor for the ReshapeLayer.
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reason=EmptyOptional()) const override
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
uint32_t GetNumDimensions() const
Get the number of dimensions.
arm_compute::Status NeonBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor)
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:62
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
ISubgraphViewConverter supported
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A Convolution2dDescriptor for the Convolution2dLayer.
bool IsConcatSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const ConcatDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSplitterSupported(const TensorInfo &input, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonMeanWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &desc)
arm_compute::Status NeonAdditionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
arm_compute::Status NeonActivationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor)
arm_compute::Status NeonMinimumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Validate function for validating the inputs and output.
arm_compute::Status NeonStridedSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor)
arm_compute::Status NeonTransposeConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
bool IsGreaterSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
bool IsResizeBilinearSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor)
bool IsConstantSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
Copyright (c) 2020 ARM Limited.
arm_compute::Status NeonQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
void IgnoreUnused(Ts &&...)
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
arm_compute::Status NeonInstanceNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor)
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A ResizeDescriptor for the ResizeLayer.
arm_compute::Status NeonL2NormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor)
arm_compute::Status NeonAbsWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
A StackDescriptor for the StackLayer.
arm_compute::Status NeonQuantizedLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo)
arm_compute::Status NeonStackWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor)
arm_compute::Status NeonSpaceToBatchNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor)
A PadDescriptor for the PadLayer.
bool IsAbsSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
DataType
Definition: Types.hpp:32
An LstmDescriptor for the LstmLayer.
arm_compute::Status NeonReshapeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsInputSupported(const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsStackSupported(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A L2NormalizationDescriptor for the L2NormalizationLayer.
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsOutputSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:43
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
DataType GetDataType() const
Definition: Tensor.hpp:95
An OriginsDescriptor for the ConcatLayer.
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool IsMergerSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const MergerDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
uint32_t m_TargetWidth
Target width value.
Status
enumeration
Definition: Types.hpp:26
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor)
arm_compute::Status NeonConcatWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const OriginsDescriptor &descriptor)
arm_compute::Status NeonPermuteWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor)
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:20
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
uint32_t m_TargetHeight
Target height value.
A SliceDescriptor for the SliceLayer.
arm_compute::Status NeonDequantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
void SetValueChecked(Optional< T &> optionalRef, V &&val)
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
Definition: Descriptors.hpp:78
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
arm_compute::Status NeonNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor)
arm_compute::Status NeonBatchToSpaceNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &desc)
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonRsqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonPadWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor)
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:82
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)
arm_compute::Status NeonSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonTransposeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor)
uint32_t GetNumDimensions() const
Get the number of dimensions.
A MeanDescriptor for the MeanLayer.
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonMaximumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonSubtractionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
arm_compute::Status NeonPreluWorkloadValidate(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output)
arm_compute::Status NeonGreaterWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
arm_compute::Status NeonDepthToSpaceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor)
A Pooling2dDescriptor for the Pooling2dLayer.
arm_compute::Status NeonResizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor)
A NormalizationDescriptor for the NormalizationLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
unsigned int GetConcatAxis() const
Get the concatenation axis value.
bool IsSupportedForDataTypeGeneric(Optional< std::string &> reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
A SoftmaxDescriptor for the SoftmaxLayer.
arm_compute::Status NeonDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
arm_compute::Status NeonPooling2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor)
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
bool IsRsqrtSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
A PermuteDescriptor for the PermuteLayer.
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override