From 6f92c8e9f8bb38dcf5dccf8deeff5112ecd8e37c Mon Sep 17 00:00:00 2001 From: Nikhil Raj Date: Wed, 22 Nov 2023 11:41:15 +0000 Subject: Update Doxygen for 23.11 Signed-off-by: Nikhil Raj Change-Id: I47cd933f5002cb94a73aa97689d7b3d9c93cb849 --- latest/_backend_helper_8cpp_source.html | 2646 ++++++++++++++++--------------- 1 file changed, 1346 insertions(+), 1300 deletions(-) (limited to 'latest/_backend_helper_8cpp_source.html') diff --git a/latest/_backend_helper_8cpp_source.html b/latest/_backend_helper_8cpp_source.html index e1f5bf377a..08fd8cec7b 100644 --- a/latest/_backend_helper_8cpp_source.html +++ b/latest/_backend_helper_8cpp_source.html @@ -36,7 +36,7 @@ ArmNN
-  23.08 +  23.11
@@ -344,44 +344,44 @@ $(document).ready(function(){initNavTree('_backend_helper_8cpp_source.html','');
246  reasonIfUnsupported);
247 }
248 
- -
250  const TensorInfo& output,
-
251  Optional<std::string&> reasonIfUnsupported)
-
252 {
-
253  TensorInfos infos{input, output};
-
254 
-
255  return m_LayerSupport->IsLayerSupported(LayerType::Cast,
-
256  infos,
-
257  BaseDescriptor(),
-
258  EmptyOptional(),
-
259  EmptyOptional(),
-
260  reasonIfUnsupported);
-
261 }
-
262 
- -
264  const TensorInfo &output,
-
265  const ChannelShuffleDescriptor &descriptor,
-
266  Optional<std::string &> reasonIfUnsupported)
-
267 {
-
268  TensorInfos infos{input, output};
-
269 
-
270  return m_LayerSupport->IsLayerSupported(LayerType::ChannelShuffle,
-
271  infos,
-
272  descriptor,
-
273  EmptyOptional(),
+
249 
+ +
251  const TensorInfo& output,
+
252  const armnn::BroadcastToDescriptor& descriptor,
+
253  Optional<std::string&> reasonIfUnsupported)
+
254 {
+
255  TensorInfos infos{input, output};
+
256 
+
257  return m_LayerSupport->IsLayerSupported(LayerType::BroadcastTo,
+
258  infos,
+
259  descriptor,
+
260  EmptyOptional(),
+
261  EmptyOptional(),
+
262  reasonIfUnsupported.value());
+
263 }
+
264 
+ +
266  const TensorInfo& output,
+
267  Optional<std::string&> reasonIfUnsupported)
+
268 {
+
269  TensorInfos infos{input, output};
+
270 
+
271  return m_LayerSupport->IsLayerSupported(LayerType::Cast,
+
272  infos,
+
273  BaseDescriptor(),
274  EmptyOptional(),
-
275  reasonIfUnsupported);
-
276 }
-
277 
- -
279  const TensorInfo& input1,
-
280  const TensorInfo& output,
-
281  const ComparisonDescriptor& descriptor,
-
282  Optional<std::string&> reasonIfUnsupported)
+
275  EmptyOptional(),
+
276  reasonIfUnsupported);
+
277 }
+
278 
+ +
280  const TensorInfo &output,
+
281  const ChannelShuffleDescriptor &descriptor,
+
282  Optional<std::string &> reasonIfUnsupported)
283 {
-
284  TensorInfos infos{input0, input1, output};
+
284  TensorInfos infos{input, output};
285 
-
286  return m_LayerSupport->IsLayerSupported(LayerType::Comparison,
+
286  return m_LayerSupport->IsLayerSupported(LayerType::ChannelShuffle,
287  infos,
288  descriptor,
289  EmptyOptional(),
@@ -389,472 +389,472 @@ $(document).ready(function(){initNavTree('_backend_helper_8cpp_source.html','');
291  reasonIfUnsupported);
292 }
293 
-
294 bool LayerSupportHandle::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
-
295  const TensorInfo& output,
-
296  const OriginsDescriptor& descriptor,
-
297  Optional<std::string&> reasonIfUnsupported)
-
298 {
-
299  TensorInfos infos;
-
300  for (const TensorInfo* inputInfo : inputs)
-
301  {
-
302  infos.push_back(*inputInfo);
-
303  }
-
304  infos.push_back(output);
-
305 
-
306  return m_LayerSupport->IsLayerSupported(LayerType::Concat,
-
307  infos,
-
308  descriptor,
-
309  EmptyOptional(),
-
310  EmptyOptional(),
-
311  reasonIfUnsupported);
-
312 }
-
313 
- -
315  Optional<std::string&> reasonIfUnsupported)
-
316 {
-
317  TensorInfos infos{output};
-
318 
-
319  return m_LayerSupport->IsLayerSupported(LayerType::Constant,
-
320  infos,
-
321  BaseDescriptor(),
-
322  EmptyOptional(),
-
323  EmptyOptional(),
-
324  reasonIfUnsupported);
-
325 }
-
326 
- -
328  const TensorInfo& output,
-
329  Optional<std::string&> reasonIfUnsupported)
-
330 {
-
331  TensorInfos infos{input, output};
-
332 
-
333  return m_LayerSupport->IsLayerSupported(LayerType::ConvertFp16ToFp32,
-
334  infos,
-
335  BaseDescriptor(),
-
336  EmptyOptional(),
-
337  EmptyOptional(),
-
338  reasonIfUnsupported);
-
339 }
-
340 
- -
342  const TensorInfo& output,
-
343  Optional<std::string&> reasonIfUnsupported)
-
344 {
-
345  TensorInfos infos{input, output};
-
346 
-
347  return m_LayerSupport->IsLayerSupported(LayerType::ConvertFp32ToFp16,
-
348  infos,
-
349  BaseDescriptor(),
-
350  EmptyOptional(),
-
351  EmptyOptional(),
-
352  reasonIfUnsupported);
-
353 }
-
354 
- -
356  const TensorInfo& output,
-
357  const Convolution2dDescriptor& descriptor,
-
358  const TensorInfo& weights,
-
359  const Optional<TensorInfo>& biases,
-
360  Optional<std::string&> reasonIfUnsupported)
-
361 {
-
362  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
-
363  TensorInfos infos{input, output, weights, biasesVal};
-
364 
- -
366  if (!m_BackendId.IsUndefined())
-
367  {
-
368  capability = GetCapability("NonConstWeights", m_BackendId);
-
369  if (!capability.has_value() || capability.value().GetValue().AsBool() == false)
-
370  {
-
371  if (!weights.IsConstant())
-
372  {
-
373  if (reasonIfUnsupported.has_value())
-
374  {
-
375  reasonIfUnsupported.value() =
-
376  "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
-
377  "Convolution2d weights are set as dynamic (non constant). ";
-
378  }
-
379  return false;
-
380  }
-
381  if (descriptor.m_BiasEnabled && !biasesVal.IsConstant())
-
382  {
-
383  if (reasonIfUnsupported.has_value())
-
384  {
-
385  reasonIfUnsupported.value() =
-
386  "Backend is not capable of supporting dynamic biases (NonConstWeights) and "
-
387  "Convolution2d biases are set as dynamic (non constant). ";
-
388  }
-
389  return false;
-
390  }
-
391 
-
392  // At the first stage we will only print a warning. this is to give
-
393  // backend developers a chance to adopt and read weights from input slots.
-
394  ARMNN_LOG(warning) << "The backend makes use of a deprecated interface to read constant tensors. "
-
395  "If you are a backend developer please find more information in our "
-
396  "doxygen documentation on github https://github.com/ARM-software/armnn "
-
397  "under the keyword 'ConstTensorsAsInputs'.";
-
398  }
-
399  }
-
400 
-
401  return m_LayerSupport->IsLayerSupported(LayerType::Convolution2d,
-
402  infos,
-
403  descriptor,
-
404  EmptyOptional(),
-
405  EmptyOptional(),
-
406  reasonIfUnsupported);
-
407 }
-
408 
- -
410  const TensorInfo& output,
-
411  const Convolution3dDescriptor& descriptor,
-
412  const TensorInfo& weights,
-
413  const Optional<TensorInfo>& biases,
-
414  Optional<std::string&> reasonIfUnsupported)
-
415 {
-
416  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
-
417  TensorInfos infos{input, output, weights, biasesVal};
-
418 
-
419  return m_LayerSupport->IsLayerSupported(LayerType::Convolution3d,
-
420  infos,
-
421  descriptor,
-
422  EmptyOptional(),
-
423  EmptyOptional(),
-
424  reasonIfUnsupported);
-
425 }
-
426 
- -
428  const TensorInfo& output,
-
429  Optional<std::string&> reasonIfUnsupported)
-
430 {
-
431  TensorInfos infos{input, output};
-
432 
-
433  return m_LayerSupport->IsLayerSupported(LayerType::Debug,
-
434  infos,
-
435  BaseDescriptor(),
-
436  EmptyOptional(),
-
437  EmptyOptional(),
-
438  reasonIfUnsupported);
-
439 }
-
440 
- -
442  const TensorInfo& output,
-
443  const DepthToSpaceDescriptor& descriptor,
-
444  Optional<std::string&> reasonIfUnsupported)
-
445 {
-
446  TensorInfos infos{input, output};
-
447 
-
448  return m_LayerSupport->IsLayerSupported(LayerType::DepthToSpace,
-
449  infos,
-
450  descriptor,
-
451  EmptyOptional(),
+ +
295  const TensorInfo& input1,
+
296  const TensorInfo& output,
+
297  const ComparisonDescriptor& descriptor,
+
298  Optional<std::string&> reasonIfUnsupported)
+
299 {
+
300  TensorInfos infos{input0, input1, output};
+
301 
+
302  return m_LayerSupport->IsLayerSupported(LayerType::Comparison,
+
303  infos,
+
304  descriptor,
+
305  EmptyOptional(),
+
306  EmptyOptional(),
+
307  reasonIfUnsupported);
+
308 }
+
309 
+
310 bool LayerSupportHandle::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
+
311  const TensorInfo& output,
+
312  const OriginsDescriptor& descriptor,
+
313  Optional<std::string&> reasonIfUnsupported)
+
314 {
+
315  TensorInfos infos;
+
316  for (const TensorInfo* inputInfo : inputs)
+
317  {
+
318  infos.push_back(*inputInfo);
+
319  }
+
320  infos.push_back(output);
+
321 
+
322  return m_LayerSupport->IsLayerSupported(LayerType::Concat,
+
323  infos,
+
324  descriptor,
+
325  EmptyOptional(),
+
326  EmptyOptional(),
+
327  reasonIfUnsupported);
+
328 }
+
329 
+ +
331  Optional<std::string&> reasonIfUnsupported)
+
332 {
+
333  TensorInfos infos{output};
+
334 
+
335  return m_LayerSupport->IsLayerSupported(LayerType::Constant,
+
336  infos,
+
337  BaseDescriptor(),
+
338  EmptyOptional(),
+
339  EmptyOptional(),
+
340  reasonIfUnsupported);
+
341 }
+
342 
+ +
344  const TensorInfo& output,
+
345  Optional<std::string&> reasonIfUnsupported)
+
346 {
+
347  TensorInfos infos{input, output};
+
348 
+
349  return m_LayerSupport->IsLayerSupported(LayerType::ConvertFp16ToFp32,
+
350  infos,
+
351  BaseDescriptor(),
+
352  EmptyOptional(),
+
353  EmptyOptional(),
+
354  reasonIfUnsupported);
+
355 }
+
356 
+ +
358  const TensorInfo& output,
+
359  Optional<std::string&> reasonIfUnsupported)
+
360 {
+
361  TensorInfos infos{input, output};
+
362 
+
363  return m_LayerSupport->IsLayerSupported(LayerType::ConvertFp32ToFp16,
+
364  infos,
+
365  BaseDescriptor(),
+
366  EmptyOptional(),
+
367  EmptyOptional(),
+
368  reasonIfUnsupported);
+
369 }
+
370 
+ +
372  const TensorInfo& output,
+
373  const Convolution2dDescriptor& descriptor,
+
374  const TensorInfo& weights,
+
375  const Optional<TensorInfo>& biases,
+
376  Optional<std::string&> reasonIfUnsupported)
+
377 {
+
378  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
+
379  TensorInfos infos{input, output, weights, biasesVal};
+
380 
+ +
382  if (!m_BackendId.IsUndefined())
+
383  {
+
384  capability = GetCapability("NonConstWeights", m_BackendId);
+
385  if (!capability.has_value() || capability.value().GetValue().AsBool() == false)
+
386  {
+
387  if (!weights.IsConstant())
+
388  {
+
389  if (reasonIfUnsupported.has_value())
+
390  {
+
391  reasonIfUnsupported.value() =
+
392  "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
+
393  "Convolution2d weights are set as dynamic (non constant). ";
+
394  }
+
395  return false;
+
396  }
+
397  if (descriptor.m_BiasEnabled && !biasesVal.IsConstant())
+
398  {
+
399  if (reasonIfUnsupported.has_value())
+
400  {
+
401  reasonIfUnsupported.value() =
+
402  "Backend is not capable of supporting dynamic biases (NonConstWeights) and "
+
403  "Convolution2d biases are set as dynamic (non constant). ";
+
404  }
+
405  return false;
+
406  }
+
407 
+
408  // At the first stage we will only print a warning. this is to give
+
409  // backend developers a chance to adopt and read weights from input slots.
+
410  ARMNN_LOG(warning) << "The backend makes use of a deprecated interface to read constant tensors. "
+
411  "If you are a backend developer please find more information in our "
+
412  "doxygen documentation on github https://github.com/ARM-software/armnn "
+
413  "under the keyword 'ConstTensorsAsInputs'.";
+
414  }
+
415  }
+
416 
+
417  return m_LayerSupport->IsLayerSupported(LayerType::Convolution2d,
+
418  infos,
+
419  descriptor,
+
420  EmptyOptional(),
+
421  EmptyOptional(),
+
422  reasonIfUnsupported);
+
423 }
+
424 
+ +
426  const TensorInfo& output,
+
427  const Convolution3dDescriptor& descriptor,
+
428  const TensorInfo& weights,
+
429  const Optional<TensorInfo>& biases,
+
430  Optional<std::string&> reasonIfUnsupported)
+
431 {
+
432  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
+
433  TensorInfos infos{input, output, weights, biasesVal};
+
434 
+
435  return m_LayerSupport->IsLayerSupported(LayerType::Convolution3d,
+
436  infos,
+
437  descriptor,
+
438  EmptyOptional(),
+
439  EmptyOptional(),
+
440  reasonIfUnsupported);
+
441 }
+
442 
+ +
444  const TensorInfo& output,
+
445  Optional<std::string&> reasonIfUnsupported)
+
446 {
+
447  TensorInfos infos{input, output};
+
448 
+
449  return m_LayerSupport->IsLayerSupported(LayerType::Debug,
+
450  infos,
+
451  BaseDescriptor(),
452  EmptyOptional(),
-
453  reasonIfUnsupported);
-
454 }
-
455 
- -
457  const TensorInfo& input,
-
458  const TensorInfo& output,
-
459  const DepthwiseConvolution2dDescriptor& descriptor,
-
460  const TensorInfo& weights,
-
461  const Optional<TensorInfo>& biases,
-
462  Optional<std::string&> reasonIfUnsupported)
-
463 {
-
464  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
-
465  TensorInfos infos{input, output, weights, biasesVal};
-
466 
- -
468  if (!m_BackendId.IsUndefined())
-
469  {
-
470  capability = GetCapability("NonConstWeights", m_BackendId);
-
471  if (!capability.has_value() || capability.value().GetValue().AsBool() == false)
-
472  {
-
473  if (!weights.IsConstant())
-
474  {
-
475  if (reasonIfUnsupported.has_value())
-
476  {
-
477  reasonIfUnsupported.value() =
-
478  "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
-
479  "DepthwiseConvolution2d weights are set as dynamic (non constant). ";
-
480  }
-
481  return false;
-
482  }
-
483  if (descriptor.m_BiasEnabled && !biasesVal.IsConstant())
-
484  {
-
485  if (reasonIfUnsupported.has_value())
-
486  {
-
487  reasonIfUnsupported.value() =
-
488  "Backend is not capable of supporting dynamic biases (NonConstWeights) and "
-
489  "DepthwiseConvolution2d biases are set as dynamic (non constant). ";
-
490  }
-
491  return false;
-
492  }
-
493  // At the first stage we will only print a warning. this is to give
-
494  // backend developers a chance to adopt and read weights from input slots.
-
495  ARMNN_LOG(warning) << "The backend makes use of a deprecated interface to read constant tensors. "
-
496  "If you are a backend developer please find more information in our "
-
497  "doxygen documentation on github https://github.com/ARM-software/armnn "
-
498  "under the keyword 'ConstTensorsAsInputs'.";
-
499  }
-
500  }
-
501 
-
502  return m_LayerSupport->IsLayerSupported(LayerType::DepthwiseConvolution2d,
-
503  infos,
-
504  descriptor,
-
505  EmptyOptional(),
-
506  EmptyOptional(),
-
507  reasonIfUnsupported);
-
508 }
-
509 
- -
511  const TensorInfo& output,
-
512  Optional<std::string&> reasonIfUnsupported)
-
513 {
-
514  TensorInfos infos{input, output};
-
515 
-
516  return m_LayerSupport->IsLayerSupported(LayerType::Dequantize,
-
517  infos,
-
518  BaseDescriptor(),
-
519  EmptyOptional(),
-
520  EmptyOptional(),
-
521  reasonIfUnsupported);
-
522 }
-
523 
- -
525  const TensorInfo& scores,
-
526  const TensorInfo& anchors,
-
527  const TensorInfo& detectionBoxes,
-
528  const TensorInfo& detectionClasses,
-
529  const TensorInfo& detectionScores,
-
530  const TensorInfo& numDetections,
-
531  const DetectionPostProcessDescriptor& descriptor,
-
532  Optional<std::string&> reasonIfUnsupported)
-
533 {
-
534  TensorInfos infos{boxEncodings, scores, anchors, detectionBoxes, detectionClasses, detectionScores, numDetections};
-
535 
-
536  return m_LayerSupport->IsLayerSupported(LayerType::DetectionPostProcess,
-
537  infos,
-
538  descriptor,
-
539  EmptyOptional(),
-
540  EmptyOptional(),
-
541  reasonIfUnsupported);
-
542 }
-
543 
- -
545  const TensorInfo& input,
-
546  const TensorInfo& output,
-
547  const DepthwiseConvolution2dDescriptor& descriptor,
-
548  const TensorInfo& weights,
-
549  const Optional<TensorInfo>& biases,
-
550  Optional<std::string&> reasonIfUnsupported)
-
551 {
-
552  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
-
553  TensorInfos infos{input, output, weights, biasesVal};
-
554 
- -
556  if (!m_BackendId.IsUndefined())
-
557  {
-
558  capability = GetCapability("NonConstWeights", m_BackendId);
-
559  if (!capability.has_value() || capability.value().GetValue().AsBool() == false)
-
560  {
-
561  if (!weights.IsConstant())
-
562  {
-
563  if (reasonIfUnsupported.has_value())
-
564  {
-
565  reasonIfUnsupported.value() =
-
566  "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
-
567  "DilatedDepthwiseConvolution2d weights are set as dynamic (non constant). ";
-
568  }
-
569  return false;
-
570  }
-
571  if (descriptor.m_BiasEnabled && !biasesVal.IsConstant())
-
572  {
-
573  if (reasonIfUnsupported.has_value())
-
574  {
-
575  reasonIfUnsupported.value() =
-
576  "Backend is not capable of supporting dynamic biases (NonConstWeights) and "
-
577  "DilatedDepthwiseConvolution2d biases are set as dynamic (non constant). ";
-
578  }
-
579  return false;
-
580  }
-
581  // At the first stage we will only print a warning. this is to give
-
582  // backend developers a chance to adopt and read weights from input slots.
-
583  ARMNN_LOG(warning) << "The backend makes use of a deprecated interface to read constant tensors. "
-
584  "If you are a backend developer please find more information in our "
-
585  "doxygen documentation on github https://github.com/ARM-software/armnn "
-
586  "under the keyword 'ConstTensorsAsInputs'.";
-
587  }
-
588  }
-
589 
-
590  return m_LayerSupport->IsLayerSupported(LayerType::DepthwiseConvolution2d,
-
591  infos,
-
592  descriptor,
-
593  EmptyOptional(),
-
594  EmptyOptional(),
-
595  reasonIfUnsupported);
-
596 }
-
597 
- -
599  const TensorInfo& input1,
-
600  const TensorInfo& output,
-
601  Optional<std::string&> reasonIfUnsupported)
-
602 {
-
603  TensorInfos infos{input0, input1, output};
-
604 
-
605  return m_LayerSupport->IsLayerSupported(LayerType::Division,
-
606  infos,
-
607  BaseDescriptor(),
-
608  EmptyOptional(),
+
453  EmptyOptional(),
+
454  reasonIfUnsupported);
+
455 }
+
456 
+ +
458  const TensorInfo& output,
+
459  const DepthToSpaceDescriptor& descriptor,
+
460  Optional<std::string&> reasonIfUnsupported)
+
461 {
+
462  TensorInfos infos{input, output};
+
463 
+
464  return m_LayerSupport->IsLayerSupported(LayerType::DepthToSpace,
+
465  infos,
+
466  descriptor,
+
467  EmptyOptional(),
+
468  EmptyOptional(),
+
469  reasonIfUnsupported);
+
470 }
+
471 
+ +
473  const TensorInfo& input,
+
474  const TensorInfo& output,
+
475  const DepthwiseConvolution2dDescriptor& descriptor,
+
476  const TensorInfo& weights,
+
477  const Optional<TensorInfo>& biases,
+
478  Optional<std::string&> reasonIfUnsupported)
+
479 {
+
480  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
+
481  TensorInfos infos{input, output, weights, biasesVal};
+
482 
+ +
484  if (!m_BackendId.IsUndefined())
+
485  {
+
486  capability = GetCapability("NonConstWeights", m_BackendId);
+
487  if (!capability.has_value() || capability.value().GetValue().AsBool() == false)
+
488  {
+
489  if (!weights.IsConstant())
+
490  {
+
491  if (reasonIfUnsupported.has_value())
+
492  {
+
493  reasonIfUnsupported.value() =
+
494  "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
+
495  "DepthwiseConvolution2d weights are set as dynamic (non constant). ";
+
496  }
+
497  return false;
+
498  }
+
499  if (descriptor.m_BiasEnabled && !biasesVal.IsConstant())
+
500  {
+
501  if (reasonIfUnsupported.has_value())
+
502  {
+
503  reasonIfUnsupported.value() =
+
504  "Backend is not capable of supporting dynamic biases (NonConstWeights) and "
+
505  "DepthwiseConvolution2d biases are set as dynamic (non constant). ";
+
506  }
+
507  return false;
+
508  }
+
509  // At the first stage we will only print a warning. this is to give
+
510  // backend developers a chance to adopt and read weights from input slots.
+
511  ARMNN_LOG(warning) << "The backend makes use of a deprecated interface to read constant tensors. "
+
512  "If you are a backend developer please find more information in our "
+
513  "doxygen documentation on github https://github.com/ARM-software/armnn "
+
514  "under the keyword 'ConstTensorsAsInputs'.";
+
515  }
+
516  }
+
517 
+
518  return m_LayerSupport->IsLayerSupported(LayerType::DepthwiseConvolution2d,
+
519  infos,
+
520  descriptor,
+
521  EmptyOptional(),
+
522  EmptyOptional(),
+
523  reasonIfUnsupported);
+
524 }
+
525 
+ +
527  const TensorInfo& output,
+
528  Optional<std::string&> reasonIfUnsupported)
+
529 {
+
530  TensorInfos infos{input, output};
+
531 
+
532  return m_LayerSupport->IsLayerSupported(LayerType::Dequantize,
+
533  infos,
+
534  BaseDescriptor(),
+
535  EmptyOptional(),
+
536  EmptyOptional(),
+
537  reasonIfUnsupported);
+
538 }
+
539 
+ +
541  const TensorInfo& scores,
+
542  const TensorInfo& anchors,
+
543  const TensorInfo& detectionBoxes,
+
544  const TensorInfo& detectionClasses,
+
545  const TensorInfo& detectionScores,
+
546  const TensorInfo& numDetections,
+
547  const DetectionPostProcessDescriptor& descriptor,
+
548  Optional<std::string&> reasonIfUnsupported)
+
549 {
+
550  TensorInfos infos{boxEncodings, scores, anchors, detectionBoxes, detectionClasses, detectionScores, numDetections};
+
551 
+
552  return m_LayerSupport->IsLayerSupported(LayerType::DetectionPostProcess,
+
553  infos,
+
554  descriptor,
+
555  EmptyOptional(),
+
556  EmptyOptional(),
+
557  reasonIfUnsupported);
+
558 }
+
559 
+ +
561  const TensorInfo& input,
+
562  const TensorInfo& output,
+
563  const DepthwiseConvolution2dDescriptor& descriptor,
+
564  const TensorInfo& weights,
+
565  const Optional<TensorInfo>& biases,
+
566  Optional<std::string&> reasonIfUnsupported)
+
567 {
+
568  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
+
569  TensorInfos infos{input, output, weights, biasesVal};
+
570 
+ +
572  if (!m_BackendId.IsUndefined())
+
573  {
+
574  capability = GetCapability("NonConstWeights", m_BackendId);
+
575  if (!capability.has_value() || capability.value().GetValue().AsBool() == false)
+
576  {
+
577  if (!weights.IsConstant())
+
578  {
+
579  if (reasonIfUnsupported.has_value())
+
580  {
+
581  reasonIfUnsupported.value() =
+
582  "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
+
583  "DilatedDepthwiseConvolution2d weights are set as dynamic (non constant). ";
+
584  }
+
585  return false;
+
586  }
+
587  if (descriptor.m_BiasEnabled && !biasesVal.IsConstant())
+
588  {
+
589  if (reasonIfUnsupported.has_value())
+
590  {
+
591  reasonIfUnsupported.value() =
+
592  "Backend is not capable of supporting dynamic biases (NonConstWeights) and "
+
593  "DilatedDepthwiseConvolution2d biases are set as dynamic (non constant). ";
+
594  }
+
595  return false;
+
596  }
+
597  // At the first stage we will only print a warning. this is to give
+
598  // backend developers a chance to adopt and read weights from input slots.
+
599  ARMNN_LOG(warning) << "The backend makes use of a deprecated interface to read constant tensors. "
+
600  "If you are a backend developer please find more information in our "
+
601  "doxygen documentation on github https://github.com/ARM-software/armnn "
+
602  "under the keyword 'ConstTensorsAsInputs'.";
+
603  }
+
604  }
+
605 
+
606  return m_LayerSupport->IsLayerSupported(LayerType::DepthwiseConvolution2d,
+
607  infos,
+
608  descriptor,
609  EmptyOptional(),
-
610  reasonIfUnsupported);
-
611 }
-
612 
- -
614  const TensorInfo &input1,
-
615  const TensorInfo &output,
-
616  const ElementwiseBinaryDescriptor &descriptor,
-
617  Optional<std::string &> reasonIfUnsupported)
+
610  EmptyOptional(),
+
611  reasonIfUnsupported);
+
612 }
+
613 
+ +
615  const TensorInfo& input1,
+
616  const TensorInfo& output,
+
617  Optional<std::string&> reasonIfUnsupported)
618 {
619  TensorInfos infos{input0, input1, output};
620 
-
621  return m_LayerSupport->IsLayerSupported(LayerType::ElementwiseBinary,
+
621  return m_LayerSupport->IsLayerSupported(LayerType::Division,
622  infos,
-
623  descriptor,
+
623  BaseDescriptor(),
624  EmptyOptional(),
625  EmptyOptional(),
626  reasonIfUnsupported);
627 }
628 
- -
630  const TensorInfo& output,
-
631  const ElementwiseUnaryDescriptor& descriptor,
-
632  Optional<std::string&> reasonIfUnsupported)
-
633 {
-
634  TensorInfos infos{input, output};
-
635 
-
636  return m_LayerSupport->IsLayerSupported(LayerType::ElementwiseUnary,
-
637  infos,
-
638  descriptor,
-
639  EmptyOptional(),
+ +
630  const TensorInfo &input1,
+
631  const TensorInfo &output,
+
632  const ElementwiseBinaryDescriptor &descriptor,
+
633  Optional<std::string &> reasonIfUnsupported)
+
634 {
+
635  TensorInfos infos{input0, input1, output};
+
636 
+
637  return m_LayerSupport->IsLayerSupported(LayerType::ElementwiseBinary,
+
638  infos,
+
639  descriptor,
640  EmptyOptional(),
-
641  reasonIfUnsupported);
-
642 }
-
643 
- -
645  const FakeQuantizationDescriptor& descriptor,
-
646  Optional<std::string&> reasonIfUnsupported)
-
647 {
-
648  TensorInfos infos{input};
-
649 
-
650  return m_LayerSupport->IsLayerSupported(LayerType::FakeQuantization,
-
651  infos,
-
652  descriptor,
-
653  EmptyOptional(),
-
654  EmptyOptional(),
-
655  reasonIfUnsupported);
-
656 }
-
657 
- -
659  const TensorInfo& output,
-
660  const FillDescriptor& descriptor,
-
661  Optional<std::string&> reasonIfUnsupported)
-
662 {
-
663  TensorInfos infos{input, output};
-
664 
-
665  return m_LayerSupport->IsLayerSupported(LayerType::Fill,
-
666  infos,
-
667  descriptor,
-
668  EmptyOptional(),
+
641  EmptyOptional(),
+
642  reasonIfUnsupported);
+
643 }
+
644 
+ +
646  const TensorInfo& output,
+
647  const ElementwiseUnaryDescriptor& descriptor,
+
648  Optional<std::string&> reasonIfUnsupported)
+
649 {
+
650  TensorInfos infos{input, output};
+
651 
+
652  return m_LayerSupport->IsLayerSupported(LayerType::ElementwiseUnary,
+
653  infos,
+
654  descriptor,
+
655  EmptyOptional(),
+
656  EmptyOptional(),
+
657  reasonIfUnsupported);
+
658 }
+
659 
+ +
661  const FakeQuantizationDescriptor& descriptor,
+
662  Optional<std::string&> reasonIfUnsupported)
+
663 {
+
664  TensorInfos infos{input};
+
665 
+
666  return m_LayerSupport->IsLayerSupported(LayerType::FakeQuantization,
+
667  infos,
+
668  descriptor,
669  EmptyOptional(),
-
670  reasonIfUnsupported);
-
671 }
-
672 
- -
674  const TensorInfo& output,
-
675  Optional<std::string&> reasonIfUnsupported)
-
676 {
-
677  TensorInfos infos{input, output};
-
678 
-
679  return m_LayerSupport->IsLayerSupported(LayerType::Floor,
-
680  infos,
-
681  BaseDescriptor(),
-
682  EmptyOptional(),
-
683  EmptyOptional(),
-
684  reasonIfUnsupported);
-
685 }
-
686 
- -
688  const TensorInfo& output,
-
689  const TensorInfo& weights,
-
690  const TensorInfo& biases,
-
691  const FullyConnectedDescriptor& descriptor,
-
692  Optional<std::string&> reasonIfUnsupported)
-
693 {
-
694  TensorInfos infos{input, output, weights, biases};
-
695 
- -
697  if (!m_BackendId.IsUndefined())
-
698  {
-
699  capability = GetCapability("NonConstWeights", m_BackendId);
-
700  if (!capability.has_value() || capability.value().GetValue().AsBool() == false)
-
701  {
-
702  if (!descriptor.m_ConstantWeights)
-
703  {
-
704  if (reasonIfUnsupported.has_value())
-
705  {
-
706  reasonIfUnsupported.value() =
-
707  "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
-
708  "FullyConnected descriptor indicates that weights are dynamic (non constant). ";
-
709  }
-
710  return false;
-
711  }
-
712  if (!weights.IsConstant())
-
713  {
-
714  if (reasonIfUnsupported.has_value())
-
715  {
-
716  reasonIfUnsupported.value() =
-
717  "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
-
718  "FullyConnected weights are set as dynamic (non constant). ";
-
719  }
-
720 
-
721  return false;
-
722  }
-
723  if (descriptor.m_BiasEnabled && !biases.IsConstant())
-
724  {
-
725  if (reasonIfUnsupported.has_value())
-
726  {
-
727  reasonIfUnsupported.value() =
-
728  "Backend is not capable of supporting dynamic biases (NonConstWeights) and "
-
729  "FullyConnected biases are set as dynamic (non constant). ";
-
730  }
-
731  return false;
-
732  }
-
733 
-
734  // At the first stage we will only print a warning. this is to give
-
735  // backend developers a chance to adopt and read weights from input slots.
-
736  ARMNN_LOG(warning) << "The backend makes use of a deprecated interface to read constant tensors. "
-
737  "If you are a backend developer please find more information in our "
-
738  "doxygen documentation on github https://github.com/ARM-software/armnn "
-
739  "under the keyword 'ConstTensorsAsInputs'.";
-
740  }
-
741  }
-
742 
-
743  return m_LayerSupport->IsLayerSupported(LayerType::FullyConnected,
-
744  infos,
-
745  descriptor,
-
746  EmptyOptional(),
-
747  EmptyOptional(),
-
748  reasonIfUnsupported);
-
749 }
-
750 
- -
752  const TensorInfo& input1,
-
753  const TensorInfo& output,
-
754  const GatherDescriptor& descriptor,
-
755  Optional<std::string&> reasonIfUnsupported)
-
756 {
-
757  TensorInfos infos{input0, input1, output};
+
670  EmptyOptional(),
+
671  reasonIfUnsupported);
+
672 }
+
673 
+ +
675  const TensorInfo& output,
+
676  const FillDescriptor& descriptor,
+
677  Optional<std::string&> reasonIfUnsupported)
+
678 {
+
679  TensorInfos infos{input, output};
+
680 
+
681  return m_LayerSupport->IsLayerSupported(LayerType::Fill,
+
682  infos,
+
683  descriptor,
+
684  EmptyOptional(),
+
685  EmptyOptional(),
+
686  reasonIfUnsupported);
+
687 }
+
688 
+ +
690  const TensorInfo& output,
+
691  Optional<std::string&> reasonIfUnsupported)
+
692 {
+
693  TensorInfos infos{input, output};
+
694 
+
695  return m_LayerSupport->IsLayerSupported(LayerType::Floor,
+
696  infos,
+
697  BaseDescriptor(),
+
698  EmptyOptional(),
+
699  EmptyOptional(),
+
700  reasonIfUnsupported);
+
701 }
+
702 
+ +
704  const TensorInfo& output,
+
705  const TensorInfo& weights,
+
706  const TensorInfo& biases,
+
707  const FullyConnectedDescriptor& descriptor,
+
708  Optional<std::string&> reasonIfUnsupported)
+
709 {
+
710  TensorInfos infos{input, output, weights, biases};
+
711 
+ +
713  if (!m_BackendId.IsUndefined())
+
714  {
+
715  capability = GetCapability("NonConstWeights", m_BackendId);
+
716  if (!capability.has_value() || capability.value().GetValue().AsBool() == false)
+
717  {
+
718  if (!descriptor.m_ConstantWeights)
+
719  {
+
720  if (reasonIfUnsupported.has_value())
+
721  {
+
722  reasonIfUnsupported.value() =
+
723  "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
+
724  "FullyConnected descriptor indicates that weights are dynamic (non constant). ";
+
725  }
+
726  return false;
+
727  }
+
728  if (!weights.IsConstant())
+
729  {
+
730  if (reasonIfUnsupported.has_value())
+
731  {
+
732  reasonIfUnsupported.value() =
+
733  "Backend is not capable of supporting dynamic weights (NonConstWeights) and "
+
734  "FullyConnected weights are set as dynamic (non constant). ";
+
735  }
+
736 
+
737  return false;
+
738  }
+
739  if (descriptor.m_BiasEnabled && !biases.IsConstant())
+
740  {
+
741  if (reasonIfUnsupported.has_value())
+
742  {
+
743  reasonIfUnsupported.value() =
+
744  "Backend is not capable of supporting dynamic biases (NonConstWeights) and "
+
745  "FullyConnected biases are set as dynamic (non constant). ";
+
746  }
+
747  return false;
+
748  }
+
749 
+
750  // At the first stage we will only print a warning. this is to give
+
751  // backend developers a chance to adopt and read weights from input slots.
+
752  ARMNN_LOG(warning) << "The backend makes use of a deprecated interface to read constant tensors. "
+
753  "If you are a backend developer please find more information in our "
+
754  "doxygen documentation on github https://github.com/ARM-software/armnn "
+
755  "under the keyword 'ConstTensorsAsInputs'.";
+
756  }
+
757  }
758 
-
759  return m_LayerSupport->IsLayerSupported(LayerType::Gather,
+
759  return m_LayerSupport->IsLayerSupported(LayerType::FullyConnected,
760  infos,
761  descriptor,
762  EmptyOptional(),
@@ -862,731 +862,771 @@ $(document).ready(function(){initNavTree('_backend_helper_8cpp_source.html','');
764  reasonIfUnsupported);
765 }
766 
- -
768  const TensorInfo& input1,
-
769  const TensorInfo& output,
-
770  Optional<std::string&> reasonIfUnsupported)
+
767 bool LayerSupportHandle::IsFusedSupported(const std::vector<std::reference_wrapper<TensorInfo>>& inputs,
+
768  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
+
769  const FusedDescriptor& descriptor,
+
770  Optional<std::string&> reasonIfUnsupported)
771 {
-
772  TensorInfos infos{input0, input1, output};
-
773 
-
774  return m_LayerSupport->IsLayerSupported(LayerType::GatherNd,
-
775  infos,
-
776  BaseDescriptor(),
-
777  EmptyOptional(),
-
778  EmptyOptional(),
-
779  reasonIfUnsupported);
-
780 }
-
781 
- -
783  Optional<std::string&> reasonIfUnsupported)
-
784 {
-
785  TensorInfos infos{input};
-
786 
-
787  return m_LayerSupport->IsLayerSupported(LayerType::Input,
-
788  infos,
-
789  BaseDescriptor(),
-
790  EmptyOptional(),
-
791  EmptyOptional(),
-
792  reasonIfUnsupported);
-
793 }
-
794 
- -
796  const TensorInfo& input,
-
797  const TensorInfo& output,
-
798  const InstanceNormalizationDescriptor& descriptor,
-
799  Optional<std::string&> reasonIfUnsupported)
-
800 {
-
801  TensorInfos infos{input, output};
-
802 
-
803  return m_LayerSupport->IsLayerSupported(LayerType::InstanceNormalization,
-
804  infos,
-
805  descriptor,
-
806  EmptyOptional(),
-
807  EmptyOptional(),
-
808  reasonIfUnsupported);
-
809 }
-
810 
- -
812  const TensorInfo& output,
-
813  const L2NormalizationDescriptor& descriptor,
-
814  Optional<std::string&> reasonIfUnsupported)
-
815 {
-
816  TensorInfos infos{input, output};
-
817 
-
818  return m_LayerSupport->IsLayerSupported(LayerType::L2Normalization,
-
819  infos,
-
820  descriptor,
-
821  EmptyOptional(),
-
822  EmptyOptional(),
-
823  reasonIfUnsupported);
-
824 }
-
825 
- -
827  const TensorInfo& input1,
-
828  const TensorInfo& output,
-
829  const LogicalBinaryDescriptor& descriptor,
-
830  Optional<std::string&> reasonIfUnsupported)
-
831 {
-
832  TensorInfos infos{input0, input1, output};
-
833 
-
834  return m_LayerSupport->IsLayerSupported(LayerType::LogicalBinary,
-
835  infos,
-
836  descriptor,
-
837  EmptyOptional(),
-
838  EmptyOptional(),
-
839  reasonIfUnsupported);
-
840 }
-
841 
- -
843  const TensorInfo& output,
-
844  const ElementwiseUnaryDescriptor& descriptor,
-
845  Optional<std::string&> reasonIfUnsupported)
-
846 {
-
847  TensorInfos infos{input, output};
-
848 
-
849  return m_LayerSupport->IsLayerSupported(LayerType::ElementwiseUnary,
-
850  infos,
-
851  descriptor,
-
852  EmptyOptional(),
-
853  EmptyOptional(),
-
854  reasonIfUnsupported);
-
855 }
-
856 
- -
858  const TensorInfo& output,
-
859  const LogSoftmaxDescriptor& descriptor,
-
860  Optional<std::string&> reasonIfUnsupported)
-
861 {
-
862  TensorInfos infos{input, output};
-
863 
-
864  return m_LayerSupport->IsLayerSupported(LayerType::LogSoftmax,
-
865  infos,
-
866  descriptor,
-
867  EmptyOptional(),
-
868  EmptyOptional(),
-
869  reasonIfUnsupported);
-
870 }
-
871 
- -
873  const TensorInfo& outputStateIn,
-
874  const TensorInfo& cellStateIn,
-
875  const TensorInfo& scratchBuffer,
-
876  const TensorInfo& outputStateOut,
-
877  const TensorInfo& cellStateOut,
-
878  const TensorInfo& output,
-
879  const LstmDescriptor& descriptor,
-
880  const LstmInputParamsInfo& paramsInfo,
-
881  Optional<std::string&> reasonIfUnsupported)
-
882 {
-
883  TensorInfos infos{input, outputStateIn, cellStateIn, scratchBuffer, outputStateOut, cellStateOut, output};
-
884 
-
885  return m_LayerSupport->IsLayerSupported(LayerType::Lstm,
-
886  infos,
-
887  descriptor,
-
888  paramsInfo,
-
889  EmptyOptional(),
-
890  reasonIfUnsupported);
-
891 }
-
892 
- -
894  const TensorInfo& input1,
-
895  const TensorInfo& output,
-
896  Optional<std::string&> reasonIfUnsupported)
-
897 {
-
898  TensorInfos infos{input0, input1, output};
-
899 
-
900  return m_LayerSupport->IsLayerSupported(LayerType::Maximum,
-
901  infos,
-
902  BaseDescriptor(),
-
903  EmptyOptional(),
-
904  EmptyOptional(),
-
905  reasonIfUnsupported);
-
906 }
-
907 
- -
909  const TensorInfo& output,
-
910  const MeanDescriptor& descriptor,
-
911  Optional<std::string&> reasonIfUnsupported)
-
912 {
-
913  TensorInfos infos{input, output};
-
914 
-
915  return m_LayerSupport->IsLayerSupported(LayerType::Mean,
-
916  infos,
-
917  descriptor,
-
918  EmptyOptional(),
-
919  EmptyOptional(),
-
920  reasonIfUnsupported);
-
921 }
-
922 
- -
924  const TensorInfo& output,
-
925  Optional<std::string&> reasonIfUnsupported)
-
926 {
-
927  TensorInfos infos{input, output};
-
928 
-
929  return m_LayerSupport->IsLayerSupported(LayerType::MemCopy,
-
930  infos,
-
931  BaseDescriptor(),
-
932  EmptyOptional(),
-
933  EmptyOptional(),
-
934  reasonIfUnsupported);
-
935 }
-
936 
- -
938  const TensorInfo& output,
-
939  Optional<std::string&> reasonIfUnsupported)
-
940 {
-
941  TensorInfos infos{input, output};
-
942 
-
943  return m_LayerSupport->IsLayerSupported(LayerType::MemImport,
-
944  infos,
-
945  BaseDescriptor(),
-
946  EmptyOptional(),
-
947  EmptyOptional(),
-
948  reasonIfUnsupported);
-
949 }
-
950 
- -
952  const TensorInfo& input1,
-
953  const TensorInfo& output,
-
954  Optional<std::string&> reasonIfUnsupported)
-
955 {
-
956  TensorInfos infos{input0, input1, output};
-
957 
-
958  return m_LayerSupport->IsLayerSupported(LayerType::Merge,
-
959  infos,
-
960  BaseDescriptor(),
-
961  EmptyOptional(),
-
962  EmptyOptional(),
-
963  reasonIfUnsupported);
-
964 }
-
965 
- -
967  const TensorInfo& input1,
-
968  const TensorInfo& output,
-
969  Optional<std::string&> reasonIfUnsupported)
-
970 {
-
971  TensorInfos infos{input0, input1, output};
-
972 
-
973  return m_LayerSupport->IsLayerSupported(LayerType::Minimum,
-
974  infos,
-
975  BaseDescriptor(),
-
976  EmptyOptional(),
-
977  EmptyOptional(),
-
978  reasonIfUnsupported);
-
979 }
-
980 
- -
982  const TensorInfo& input1,
-
983  const TensorInfo& output,
-
984  Optional<std::string&> reasonIfUnsupported)
-
985 {
-
986  TensorInfos infos{input0, input1, output};
-
987 
-
988  return m_LayerSupport->IsLayerSupported(LayerType::Multiplication,
-
989  infos,
-
990  BaseDescriptor(),
-
991  EmptyOptional(),
-
992  EmptyOptional(),
-
993  reasonIfUnsupported);
-
994 }
-
995 
- -
997  const TensorInfo& output,
-
998  const NormalizationDescriptor& descriptor,
-
999  Optional<std::string&> reasonIfUnsupported)
-
1000 {
-
1001  TensorInfos infos{input, output};
-
1002 
-
1003  return m_LayerSupport->IsLayerSupported(LayerType::Normalization,
-
1004  infos,
-
1005  descriptor,
-
1006  EmptyOptional(),
-
1007  EmptyOptional(),
-
1008  reasonIfUnsupported);
-
1009 }
-
1010 
- -
1012  Optional<std::string&> reasonIfUnsupported)
-
1013 {
-
1014  TensorInfos infos{output};
-
1015 
-
1016  return m_LayerSupport->IsLayerSupported(LayerType::Output,
-
1017  infos,
-
1018  BaseDescriptor(),
-
1019  EmptyOptional(),
-
1020  EmptyOptional(),
-
1021  reasonIfUnsupported);
-
1022 }
-
1023 
- -
1025  const TensorInfo& output,
-
1026  const PadDescriptor& descriptor,
-
1027  Optional<std::string&> reasonIfUnsupported)
-
1028 {
-
1029  TensorInfos infos{input, output};
-
1030 
-
1031  return m_LayerSupport->IsLayerSupported(LayerType::Pad,
-
1032  infos,
-
1033  descriptor,
-
1034  EmptyOptional(),
-
1035  EmptyOptional(),
-
1036  reasonIfUnsupported);
-
1037 }
-
1038 
- -
1040  const TensorInfo& output,
-
1041  const PermuteDescriptor& descriptor,
-
1042  Optional<std::string&> reasonIfUnsupported)
-
1043 {
-
1044  TensorInfos infos{input, output};
-
1045 
-
1046  return m_LayerSupport->IsLayerSupported(LayerType::Permute,
-
1047  infos,
-
1048  descriptor,
-
1049  EmptyOptional(),
-
1050  EmptyOptional(),
-
1051  reasonIfUnsupported);
-
1052 }
-
1053 
- -
1055  const TensorInfo& output,
-
1056  const Pooling2dDescriptor& descriptor,
-
1057  Optional<std::string&> reasonIfUnsupported)
-
1058 {
-
1059  TensorInfos infos{input, output};
-
1060 
-
1061  return m_LayerSupport->IsLayerSupported(LayerType::Pooling2d,
-
1062  infos,
-
1063  descriptor,
-
1064  EmptyOptional(),
-
1065  EmptyOptional(),
-
1066  reasonIfUnsupported);
-
1067 }
-
1068 
- -
1070  const TensorInfo& output,
-
1071  const Pooling3dDescriptor& descriptor,
-
1072  Optional<std::string&> reasonIfUnsupported)
-
1073 {
-
1074  TensorInfos infos{input, output};
-
1075 
-
1076  return m_LayerSupport->IsLayerSupported(LayerType::Pooling3d,
-
1077  infos,
-
1078  descriptor,
-
1079  EmptyOptional(),
-
1080  EmptyOptional(),
-
1081  reasonIfUnsupported);
-
1082 }
-
1083 
- -
1085  const PreCompiledDescriptor& descriptor,
-
1086  Optional<std::string&> reasonIfUnsupported)
-
1087 {
-
1088  TensorInfos infos{input};
-
1089 
-
1090  return m_LayerSupport->IsLayerSupported(LayerType::PreCompiled,
-
1091  infos,
-
1092  descriptor,
-
1093  EmptyOptional(),
-
1094  EmptyOptional(),
-
1095  reasonIfUnsupported);
-
1096 }
-
1097 
- -
1099  const TensorInfo& alpha,
-
1100  const TensorInfo& output,
-
1101  Optional<std::string&> reasonIfUnsupported)
-
1102 {
-
1103  TensorInfos infos{input, alpha, output};
-
1104 
-
1105  return m_LayerSupport->IsLayerSupported(LayerType::Prelu,
-
1106  infos,
-
1107  BaseDescriptor(),
-
1108  EmptyOptional(),
-
1109  EmptyOptional(),
-
1110  reasonIfUnsupported);
-
1111 }
-
1112 
- -
1114  const TensorInfo& output,
-
1115  Optional<std::string&> reasonIfUnsupported)
-
1116 {
-
1117  TensorInfos infos{input, output};
-
1118 
-
1119  return m_LayerSupport->IsLayerSupported(LayerType::Quantize,
-
1120  infos,
-
1121  BaseDescriptor(),
-
1122  EmptyOptional(),
-
1123  EmptyOptional(),
-
1124  reasonIfUnsupported);
-
1125 }
-
1126 
- -
1128  const TensorInfo& previousOutputIn,
-
1129  const TensorInfo& previousCellStateIn,
-
1130  const TensorInfo& outputStateOut,
-
1131  const TensorInfo& cellStateOut,
-
1132  const TensorInfo& output,
-
1133  const QLstmDescriptor& descriptor,
-
1134  const LstmInputParamsInfo& paramsInfo,
-
1135  Optional<std::string&> reasonIfUnsupported)
-
1136 {
-
1137  TensorInfos infos{input, previousOutputIn, previousCellStateIn, outputStateOut, cellStateOut, output};
-
1138 
-
1139  return m_LayerSupport->IsLayerSupported(LayerType::QLstm,
-
1140  infos,
-
1141  descriptor,
-
1142  paramsInfo,
-
1143  EmptyOptional(),
-
1144  reasonIfUnsupported);
-
1145 }
-
1146 
- -
1148  const TensorInfo& previousCellStateIn,
-
1149  const TensorInfo& previousOutputIn,
-
1150  const TensorInfo& cellStateOut,
-
1151  const TensorInfo& output,
-
1152  const QuantizedLstmInputParamsInfo& paramsInfo,
-
1153  Optional<std::string&> reasonIfUnsupported)
-
1154 {
-
1155  TensorInfos infos{input, previousCellStateIn, previousOutputIn, cellStateOut, output};
-
1156 
-
1157  return m_LayerSupport->IsLayerSupported(LayerType::QuantizedLstm,
-
1158  infos,
-
1159  BaseDescriptor(),
-
1160  EmptyOptional(),
-
1161  paramsInfo,
-
1162  reasonIfUnsupported);
-
1163 }
-
1164 
- -
1166  const TensorInfo& output,
-
1167  Optional<std::string&> reasonIfUnsupported)
-
1168 {
-
1169  TensorInfos infos{input, output};
-
1170 
-
1171  return m_LayerSupport->IsLayerSupported(LayerType::Rank,
-
1172  infos,
-
1173  BaseDescriptor(),
-
1174  EmptyOptional(),
-
1175  EmptyOptional(),
-
1176  reasonIfUnsupported);
-
1177 }
+
772  TensorInfos infos;
+
773  infos.reserve(inputs.size() + outputs.size());
+
774  for (TensorInfo inInfo : inputs)
+
775  {
+
776  infos.emplace_back(inInfo);
+
777  }
+
778  for (TensorInfo outInfo : outputs)
+
779  {
+
780  infos.emplace_back(outInfo);
+
781  }
+
782 
+
783  return m_LayerSupport->IsLayerSupported(LayerType::Fused,
+
784  infos,
+
785  descriptor,
+
786  EmptyOptional(),
+
787  EmptyOptional(),
+
788  reasonIfUnsupported);
+
789 }
+
790 
+ +
792  const TensorInfo& input1,
+
793  const TensorInfo& output,
+
794  const GatherDescriptor& descriptor,
+
795  Optional<std::string&> reasonIfUnsupported)
+
796 {
+
797  TensorInfos infos{input0, input1, output};
+
798 
+
799  return m_LayerSupport->IsLayerSupported(LayerType::Gather,
+
800  infos,
+
801  descriptor,
+
802  EmptyOptional(),
+
803  EmptyOptional(),
+
804  reasonIfUnsupported);
+
805 }
+
806 
+ +
808  const TensorInfo& input1,
+
809  const TensorInfo& output,
+
810  Optional<std::string&> reasonIfUnsupported)
+
811 {
+
812  TensorInfos infos{input0, input1, output};
+
813 
+
814  return m_LayerSupport->IsLayerSupported(LayerType::GatherNd,
+
815  infos,
+
816  BaseDescriptor(),
+
817  EmptyOptional(),
+
818  EmptyOptional(),
+
819  reasonIfUnsupported);
+
820 }
+
821 
+ +
823  Optional<std::string&> reasonIfUnsupported)
+
824 {
+
825  TensorInfos infos{input};
+
826 
+
827  return m_LayerSupport->IsLayerSupported(LayerType::Input,
+
828  infos,
+
829  BaseDescriptor(),
+
830  EmptyOptional(),
+
831  EmptyOptional(),
+
832  reasonIfUnsupported);
+
833 }
+
834 
+ +
836  const TensorInfo& input,
+
837  const TensorInfo& output,
+
838  const InstanceNormalizationDescriptor& descriptor,
+
839  Optional<std::string&> reasonIfUnsupported)
+
840 {
+
841  TensorInfos infos{input, output};
+
842 
+
843  return m_LayerSupport->IsLayerSupported(LayerType::InstanceNormalization,
+
844  infos,
+
845  descriptor,
+
846  EmptyOptional(),
+
847  EmptyOptional(),
+
848  reasonIfUnsupported);
+
849 }
+
850 
+ +
852  const TensorInfo& output,
+
853  const L2NormalizationDescriptor& descriptor,
+
854  Optional<std::string&> reasonIfUnsupported)
+
855 {
+
856  TensorInfos infos{input, output};
+
857 
+
858  return m_LayerSupport->IsLayerSupported(LayerType::L2Normalization,
+
859  infos,
+
860  descriptor,
+
861  EmptyOptional(),
+
862  EmptyOptional(),
+
863  reasonIfUnsupported);
+
864 }
+
865 
+ +
867  const TensorInfo& input1,
+
868  const TensorInfo& output,
+
869  const LogicalBinaryDescriptor& descriptor,
+
870  Optional<std::string&> reasonIfUnsupported)
+
871 {
+
872  TensorInfos infos{input0, input1, output};
+
873 
+
874  return m_LayerSupport->IsLayerSupported(LayerType::LogicalBinary,
+
875  infos,
+
876  descriptor,
+
877  EmptyOptional(),
+
878  EmptyOptional(),
+
879  reasonIfUnsupported);
+
880 }
+
881 
+ +
883  const TensorInfo& output,
+
884  const ElementwiseUnaryDescriptor& descriptor,
+
885  Optional<std::string&> reasonIfUnsupported)
+
886 {
+
887  TensorInfos infos{input, output};
+
888 
+
889  return m_LayerSupport->IsLayerSupported(LayerType::ElementwiseUnary,
+
890  infos,
+
891  descriptor,
+
892  EmptyOptional(),
+
893  EmptyOptional(),
+
894  reasonIfUnsupported);
+
895 }
+
896 
+ +
898  const TensorInfo& output,
+
899  const LogSoftmaxDescriptor& descriptor,
+
900  Optional<std::string&> reasonIfUnsupported)
+
901 {
+
902  TensorInfos infos{input, output};
+
903 
+
904  return m_LayerSupport->IsLayerSupported(LayerType::LogSoftmax,
+
905  infos,
+
906  descriptor,
+
907  EmptyOptional(),
+
908  EmptyOptional(),
+
909  reasonIfUnsupported);
+
910 }
+
911 
+ +
913  const TensorInfo& outputStateIn,
+
914  const TensorInfo& cellStateIn,
+
915  const TensorInfo& scratchBuffer,
+
916  const TensorInfo& outputStateOut,
+
917  const TensorInfo& cellStateOut,
+
918  const TensorInfo& output,
+
919  const LstmDescriptor& descriptor,
+
920  const LstmInputParamsInfo& paramsInfo,
+
921  Optional<std::string&> reasonIfUnsupported)
+
922 {
+
923  TensorInfos infos{input, outputStateIn, cellStateIn, scratchBuffer, outputStateOut, cellStateOut, output};
+
924 
+
925  return m_LayerSupport->IsLayerSupported(LayerType::Lstm,
+
926  infos,
+
927  descriptor,
+
928  paramsInfo,
+
929  EmptyOptional(),
+
930  reasonIfUnsupported);
+
931 }
+
932 
+ +
934  const TensorInfo& input1,
+
935  const TensorInfo& output,
+
936  Optional<std::string&> reasonIfUnsupported)
+
937 {
+
938  TensorInfos infos{input0, input1, output};
+
939 
+
940  return m_LayerSupport->IsLayerSupported(LayerType::Maximum,
+
941  infos,
+
942  BaseDescriptor(),
+
943  EmptyOptional(),
+
944  EmptyOptional(),
+
945  reasonIfUnsupported);
+
946 }
+
947 
+ +
949  const TensorInfo& output,
+
950  const MeanDescriptor& descriptor,
+
951  Optional<std::string&> reasonIfUnsupported)
+
952 {
+
953  TensorInfos infos{input, output};
+
954 
+
955  return m_LayerSupport->IsLayerSupported(LayerType::Mean,
+
956  infos,
+
957  descriptor,
+
958  EmptyOptional(),
+
959  EmptyOptional(),
+
960  reasonIfUnsupported);
+
961 }
+
962 
+ +
964  const TensorInfo& output,
+
965  Optional<std::string&> reasonIfUnsupported)
+
966 {
+
967  TensorInfos infos{input, output};
+
968 
+
969  return m_LayerSupport->IsLayerSupported(LayerType::MemCopy,
+
970  infos,
+
971  BaseDescriptor(),
+
972  EmptyOptional(),
+
973  EmptyOptional(),
+
974  reasonIfUnsupported);
+
975 }
+
976 
+ +
978  const TensorInfo& output,
+
979  Optional<std::string&> reasonIfUnsupported)
+
980 {
+
981  TensorInfos infos{input, output};
+
982 
+
983  return m_LayerSupport->IsLayerSupported(LayerType::MemImport,
+
984  infos,
+
985  BaseDescriptor(),
+
986  EmptyOptional(),
+
987  EmptyOptional(),
+
988  reasonIfUnsupported);
+
989 }
+
990 
+ +
992  const TensorInfo& input1,
+
993  const TensorInfo& output,
+
994  Optional<std::string&> reasonIfUnsupported)
+
995 {
+
996  TensorInfos infos{input0, input1, output};
+
997 
+
998  return m_LayerSupport->IsLayerSupported(LayerType::Merge,
+
999  infos,
+
1000  BaseDescriptor(),
+
1001  EmptyOptional(),
+
1002  EmptyOptional(),
+
1003  reasonIfUnsupported);
+
1004 }
+
1005 
+ +
1007  const TensorInfo& input1,
+
1008  const TensorInfo& output,
+
1009  Optional<std::string&> reasonIfUnsupported)
+
1010 {
+
1011  TensorInfos infos{input0, input1, output};
+
1012 
+
1013  return m_LayerSupport->IsLayerSupported(LayerType::Minimum,
+
1014  infos,
+
1015  BaseDescriptor(),
+
1016  EmptyOptional(),
+
1017  EmptyOptional(),
+
1018  reasonIfUnsupported);
+
1019 }
+
1020 
+ +
1022  const TensorInfo& input1,
+
1023  const TensorInfo& output,
+
1024  Optional<std::string&> reasonIfUnsupported)
+
1025 {
+
1026  TensorInfos infos{input0, input1, output};
+
1027 
+
1028  return m_LayerSupport->IsLayerSupported(LayerType::Multiplication,
+
1029  infos,
+
1030  BaseDescriptor(),
+
1031  EmptyOptional(),
+
1032  EmptyOptional(),
+
1033  reasonIfUnsupported);
+
1034 }
+
1035 
+ +
1037  const TensorInfo& output,
+
1038  const NormalizationDescriptor& descriptor,
+
1039  Optional<std::string&> reasonIfUnsupported)
+
1040 {
+
1041  TensorInfos infos{input, output};
+
1042 
+
1043  return m_LayerSupport->IsLayerSupported(LayerType::Normalization,
+
1044  infos,
+
1045  descriptor,
+
1046  EmptyOptional(),
+
1047  EmptyOptional(),
+
1048  reasonIfUnsupported);
+
1049 }
+
1050 
+ +
1052  Optional<std::string&> reasonIfUnsupported)
+
1053 {
+
1054  TensorInfos infos{output};
+
1055 
+
1056  return m_LayerSupport->IsLayerSupported(LayerType::Output,
+
1057  infos,
+
1058  BaseDescriptor(),
+
1059  EmptyOptional(),
+
1060  EmptyOptional(),
+
1061  reasonIfUnsupported);
+
1062 }
+
1063 
+ +
1065  const TensorInfo& output,
+
1066  const PadDescriptor& descriptor,
+
1067  Optional<std::string&> reasonIfUnsupported)
+
1068 {
+
1069  TensorInfos infos{input, output};
+
1070 
+
1071  return m_LayerSupport->IsLayerSupported(LayerType::Pad,
+
1072  infos,
+
1073  descriptor,
+
1074  EmptyOptional(),
+
1075  EmptyOptional(),
+
1076  reasonIfUnsupported);
+
1077 }
+
1078 
+ +
1080  const TensorInfo& output,
+
1081  const PermuteDescriptor& descriptor,
+
1082  Optional<std::string&> reasonIfUnsupported)
+
1083 {
+
1084  TensorInfos infos{input, output};
+
1085 
+
1086  return m_LayerSupport->IsLayerSupported(LayerType::Permute,
+
1087  infos,
+
1088  descriptor,
+
1089  EmptyOptional(),
+
1090  EmptyOptional(),
+
1091  reasonIfUnsupported);
+
1092 }
+
1093 
+ +
1095  const TensorInfo& output,
+
1096  const Pooling2dDescriptor& descriptor,
+
1097  Optional<std::string&> reasonIfUnsupported)
+
1098 {
+
1099  TensorInfos infos{input, output};
+
1100 
+
1101  return m_LayerSupport->IsLayerSupported(LayerType::Pooling2d,
+
1102  infos,
+
1103  descriptor,
+
1104  EmptyOptional(),
+
1105  EmptyOptional(),
+
1106  reasonIfUnsupported);
+
1107 }
+
1108 
+ +
1110  const TensorInfo& output,
+
1111  const Pooling3dDescriptor& descriptor,
+
1112  Optional<std::string&> reasonIfUnsupported)
+
1113 {
+
1114  TensorInfos infos{input, output};
+
1115 
+
1116  return m_LayerSupport->IsLayerSupported(LayerType::Pooling3d,
+
1117  infos,
+
1118  descriptor,
+
1119  EmptyOptional(),
+
1120  EmptyOptional(),
+
1121  reasonIfUnsupported);
+
1122 }
+
1123 
+ +
1125  const PreCompiledDescriptor& descriptor,
+
1126  Optional<std::string&> reasonIfUnsupported)
+
1127 {
+
1128  TensorInfos infos{input};
+
1129 
+
1130  return m_LayerSupport->IsLayerSupported(LayerType::PreCompiled,
+
1131  infos,
+
1132  descriptor,
+
1133  EmptyOptional(),
+
1134  EmptyOptional(),
+
1135  reasonIfUnsupported);
+
1136 }
+
1137 
+ +
1139  const TensorInfo& alpha,
+
1140  const TensorInfo& output,
+
1141  Optional<std::string&> reasonIfUnsupported)
+
1142 {
+
1143  TensorInfos infos{input, alpha, output};
+
1144 
+
1145  return m_LayerSupport->IsLayerSupported(LayerType::Prelu,
+
1146  infos,
+
1147  BaseDescriptor(),
+
1148  EmptyOptional(),
+
1149  EmptyOptional(),
+
1150  reasonIfUnsupported);
+
1151 }
+
1152 
+ +
1154  const TensorInfo& output,
+
1155  Optional<std::string&> reasonIfUnsupported)
+
1156 {
+
1157  TensorInfos infos{input, output};
+
1158 
+
1159  return m_LayerSupport->IsLayerSupported(LayerType::Quantize,
+
1160  infos,
+
1161  BaseDescriptor(),
+
1162  EmptyOptional(),
+
1163  EmptyOptional(),
+
1164  reasonIfUnsupported);
+
1165 }
+
1166 
+ +
1168  const TensorInfo& previousOutputIn,
+
1169  const TensorInfo& previousCellStateIn,
+
1170  const TensorInfo& outputStateOut,
+
1171  const TensorInfo& cellStateOut,
+
1172  const TensorInfo& output,
+
1173  const QLstmDescriptor& descriptor,
+
1174  const LstmInputParamsInfo& paramsInfo,
+
1175  Optional<std::string&> reasonIfUnsupported)
+
1176 {
+
1177  TensorInfos infos{input, previousOutputIn, previousCellStateIn, outputStateOut, cellStateOut, output};
1178 
- -
1180  const TensorInfo& output,
-
1181  const ReduceDescriptor& descriptor,
-
1182  Optional<std::string&> reasonIfUnsupported)
-
1183 {
-
1184  TensorInfos infos{input, output};
-
1185 
-
1186  return m_LayerSupport->IsLayerSupported(LayerType::Reduce,
-
1187  infos,
-
1188  descriptor,
-
1189  EmptyOptional(),
-
1190  EmptyOptional(),
-
1191  reasonIfUnsupported);
-
1192 }
-
1193 
- -
1195  const TensorInfo& output,
-
1196  const ReshapeDescriptor& descriptor,
-
1197  Optional<std::string&> reasonIfUnsupported)
-
1198 {
-
1199  TensorInfos infos{input, output};
-
1200 
-
1201  return m_LayerSupport->IsLayerSupported(LayerType::Reshape,
-
1202  infos,
-
1203  descriptor,
-
1204  EmptyOptional(),
-
1205  EmptyOptional(),
-
1206  reasonIfUnsupported);
-
1207 }
-
1208 
- -
1210  const TensorInfo& output,
-
1211  const ResizeDescriptor& descriptor,
-
1212  Optional<std::string&> reasonIfUnsupported)
-
1213 {
-
1214  TensorInfos infos{input, output};
-
1215 
-
1216  return m_LayerSupport->IsLayerSupported(LayerType::Resize,
-
1217  infos,
-
1218  descriptor,
-
1219  EmptyOptional(),
-
1220  EmptyOptional(),
-
1221  reasonIfUnsupported);
-
1222 }
-
1223 
- -
1225  const armnn::TensorInfo &input1,
-
1226  const armnn::TensorInfo &output,
-
1227  Optional<std::string &> reasonIfUnsupported)
-
1228 {
-
1229  TensorInfos infos{input0, input1, output};
-
1230 
-
1231  return m_LayerSupport->IsLayerSupported(LayerType::ReverseV2,
-
1232  infos,
-
1233  BaseDescriptor(),
-
1234  EmptyOptional(),
-
1235  EmptyOptional(),
-
1236  reasonIfUnsupported);
-
1237 }
-
1238 
- -
1240  const TensorInfo& output,
-
1241  Optional<std::string&> reasonIfUnsupported)
-
1242 {
-
1243  TensorInfos infos{input, output};
-
1244 
-
1245  return m_LayerSupport->IsLayerSupported(LayerType::Shape,
-
1246  infos,
-
1247  BaseDescriptor(),
-
1248  EmptyOptional(),
-
1249  EmptyOptional(),
-
1250  reasonIfUnsupported);
-
1251 }
-
1252 
- -
1254  const TensorInfo& output,
-
1255  const SliceDescriptor& descriptor,
-
1256  Optional<std::string&> reasonIfUnsupported)
-
1257 {
-
1258  TensorInfos infos{input, output};
-
1259 
-
1260  return m_LayerSupport->IsLayerSupported(LayerType::Slice,
-
1261  infos,
-
1262  descriptor,
-
1263  EmptyOptional(),
-
1264  EmptyOptional(),
-
1265  reasonIfUnsupported);
-
1266 }
-
1267 
- -
1269  const TensorInfo& output,
-
1270  const SoftmaxDescriptor& descriptor,
-
1271  Optional<std::string&> reasonIfUnsupported)
-
1272 {
-
1273  TensorInfos infos{input, output};
-
1274 
-
1275  return m_LayerSupport->IsLayerSupported(LayerType::Softmax,
-
1276  infos,
-
1277  descriptor,
-
1278  EmptyOptional(),
-
1279  EmptyOptional(),
-
1280  reasonIfUnsupported);
-
1281 }
-
1282 
- -
1284  const TensorInfo& output,
-
1285  const SpaceToBatchNdDescriptor& descriptor,
-
1286  Optional<std::string&> reasonIfUnsupported)
-
1287 {
-
1288  TensorInfos infos{input, output};
-
1289 
-
1290  return m_LayerSupport->IsLayerSupported(LayerType::SpaceToBatchNd,
-
1291  infos,
-
1292  descriptor,
-
1293  EmptyOptional(),
-
1294  EmptyOptional(),
-
1295  reasonIfUnsupported);
-
1296 }
-
1297 
- -
1299  const TensorInfo& output,
-
1300  const SpaceToDepthDescriptor& descriptor,
-
1301  Optional<std::string&> reasonIfUnsupported)
-
1302 {
-
1303  TensorInfos infos{input, output};
-
1304 
-
1305  return m_LayerSupport->IsLayerSupported(LayerType::SpaceToDepth,
-
1306  infos,
-
1307  descriptor,
-
1308  EmptyOptional(),
-
1309  EmptyOptional(),
-
1310  reasonIfUnsupported);
-
1311 }
-
1312 
- -
1314  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
-
1315  const ViewsDescriptor& descriptor,
-
1316  Optional<std::string&> reasonIfUnsupported)
-
1317 {
-
1318  TensorInfos infos{input};
-
1319  for (TensorInfo outInfo : outputs)
-
1320  {
-
1321  infos.push_back(outInfo);
-
1322  }
-
1323 
-
1324  return m_LayerSupport->IsLayerSupported(LayerType::Splitter,
-
1325  infos,
-
1326  descriptor,
-
1327  EmptyOptional(),
-
1328  EmptyOptional(),
-
1329  reasonIfUnsupported);
-
1330 }
-
1331 
-
1332 bool LayerSupportHandle::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
-
1333  const TensorInfo& output,
-
1334  const StackDescriptor& descriptor,
-
1335  Optional<std::string&> reasonIfUnsupported)
-
1336 {
-
1337  TensorInfos infos;
-
1338  for (const TensorInfo* inputInfo : inputs)
-
1339  {
-
1340  infos.push_back(*inputInfo);
-
1341  }
-
1342  infos.push_back(output);
-
1343 
-
1344  return m_LayerSupport->IsLayerSupported(LayerType::Stack,
-
1345  infos,
-
1346  descriptor,
-
1347  EmptyOptional(),
+
1179  return m_LayerSupport->IsLayerSupported(LayerType::QLstm,
+
1180  infos,
+
1181  descriptor,
+
1182  paramsInfo,
+
1183  EmptyOptional(),
+
1184  reasonIfUnsupported);
+
1185 }
+
1186 
+ +
1188  const TensorInfo& previousCellStateIn,
+
1189  const TensorInfo& previousOutputIn,
+
1190  const TensorInfo& cellStateOut,
+
1191  const TensorInfo& output,
+
1192  const QuantizedLstmInputParamsInfo& paramsInfo,
+
1193  Optional<std::string&> reasonIfUnsupported)
+
1194 {
+
1195  TensorInfos infos{input, previousCellStateIn, previousOutputIn, cellStateOut, output};
+
1196 
+
1197  return m_LayerSupport->IsLayerSupported(LayerType::QuantizedLstm,
+
1198  infos,
+
1199  BaseDescriptor(),
+
1200  EmptyOptional(),
+
1201  paramsInfo,
+
1202  reasonIfUnsupported);
+
1203 }
+
1204 
+ +
1206  const TensorInfo& output,
+
1207  Optional<std::string&> reasonIfUnsupported)
+
1208 {
+
1209  TensorInfos infos{input, output};
+
1210 
+
1211  return m_LayerSupport->IsLayerSupported(LayerType::Rank,
+
1212  infos,
+
1213  BaseDescriptor(),
+
1214  EmptyOptional(),
+
1215  EmptyOptional(),
+
1216  reasonIfUnsupported);
+
1217 }
+
1218 
+ +
1220  const TensorInfo& output,
+
1221  const ReduceDescriptor& descriptor,
+
1222  Optional<std::string&> reasonIfUnsupported)
+
1223 {
+
1224  TensorInfos infos{input, output};
+
1225 
+
1226  return m_LayerSupport->IsLayerSupported(LayerType::Reduce,
+
1227  infos,
+
1228  descriptor,
+
1229  EmptyOptional(),
+
1230  EmptyOptional(),
+
1231  reasonIfUnsupported);
+
1232 }
+
1233 
+ +
1235  const TensorInfo& output,
+
1236  const ReshapeDescriptor& descriptor,
+
1237  Optional<std::string&> reasonIfUnsupported)
+
1238 {
+
1239  TensorInfos infos{input, output};
+
1240 
+
1241  return m_LayerSupport->IsLayerSupported(LayerType::Reshape,
+
1242  infos,
+
1243  descriptor,
+
1244  EmptyOptional(),
+
1245  EmptyOptional(),
+
1246  reasonIfUnsupported);
+
1247 }
+
1248 
+ +
1250  const TensorInfo& output,
+
1251  const ResizeDescriptor& descriptor,
+
1252  Optional<std::string&> reasonIfUnsupported)
+
1253 {
+
1254  TensorInfos infos{input, output};
+
1255 
+
1256  return m_LayerSupport->IsLayerSupported(LayerType::Resize,
+
1257  infos,
+
1258  descriptor,
+
1259  EmptyOptional(),
+
1260  EmptyOptional(),
+
1261  reasonIfUnsupported);
+
1262 }
+
1263 
+ +
1265  const armnn::TensorInfo &input1,
+
1266  const armnn::TensorInfo &output,
+
1267  Optional<std::string &> reasonIfUnsupported)
+
1268 {
+
1269  TensorInfos infos{input0, input1, output};
+
1270 
+
1271  return m_LayerSupport->IsLayerSupported(LayerType::ReverseV2,
+
1272  infos,
+
1273  BaseDescriptor(),
+
1274  EmptyOptional(),
+
1275  EmptyOptional(),
+
1276  reasonIfUnsupported);
+
1277 }
+
1278 
+ +
1280  const TensorInfo& output,
+
1281  Optional<std::string&> reasonIfUnsupported)
+
1282 {
+
1283  TensorInfos infos{input, output};
+
1284 
+
1285  return m_LayerSupport->IsLayerSupported(LayerType::Shape,
+
1286  infos,
+
1287  BaseDescriptor(),
+
1288  EmptyOptional(),
+
1289  EmptyOptional(),
+
1290  reasonIfUnsupported);
+
1291 }
+
1292 
+ +
1294  const TensorInfo& output,
+
1295  const SliceDescriptor& descriptor,
+
1296  Optional<std::string&> reasonIfUnsupported)
+
1297 {
+
1298  TensorInfos infos{input, output};
+
1299 
+
1300  return m_LayerSupport->IsLayerSupported(LayerType::Slice,
+
1301  infos,
+
1302  descriptor,
+
1303  EmptyOptional(),
+
1304  EmptyOptional(),
+
1305  reasonIfUnsupported);
+
1306 }
+
1307 
+ +
1309  const TensorInfo& output,
+
1310  const SoftmaxDescriptor& descriptor,
+
1311  Optional<std::string&> reasonIfUnsupported)
+
1312 {
+
1313  TensorInfos infos{input, output};
+
1314 
+
1315  return m_LayerSupport->IsLayerSupported(LayerType::Softmax,
+
1316  infos,
+
1317  descriptor,
+
1318  EmptyOptional(),
+
1319  EmptyOptional(),
+
1320  reasonIfUnsupported);
+
1321 }
+
1322 
+ +
1324  const TensorInfo& output,
+
1325  const SpaceToBatchNdDescriptor& descriptor,
+
1326  Optional<std::string&> reasonIfUnsupported)
+
1327 {
+
1328  TensorInfos infos{input, output};
+
1329 
+
1330  return m_LayerSupport->IsLayerSupported(LayerType::SpaceToBatchNd,
+
1331  infos,
+
1332  descriptor,
+
1333  EmptyOptional(),
+
1334  EmptyOptional(),
+
1335  reasonIfUnsupported);
+
1336 }
+
1337 
+ +
1339  const TensorInfo& output,
+
1340  const SpaceToDepthDescriptor& descriptor,
+
1341  Optional<std::string&> reasonIfUnsupported)
+
1342 {
+
1343  TensorInfos infos{input, output};
+
1344 
+
1345  return m_LayerSupport->IsLayerSupported(LayerType::SpaceToDepth,
+
1346  infos,
+
1347  descriptor,
1348  EmptyOptional(),
-
1349  reasonIfUnsupported);
-
1350 }
-
1351 
-
1352 bool LayerSupportHandle::IsStandInSupported(const std::vector<const TensorInfo*>& inputs,
-
1353  const std::vector<const TensorInfo*>& outputs,
-
1354  const StandInDescriptor& descriptor,
-
1355  Optional<std::string&> reasonIfUnsupported)
-
1356 {
-
1357  TensorInfos infos;
-
1358  for (const TensorInfo* inputInfo : inputs)
-
1359  {
-
1360  infos.push_back(*inputInfo);
-
1361  }
-
1362  for (const TensorInfo* outputInfo : outputs)
-
1363  {
-
1364  infos.push_back(*outputInfo);
-
1365  }
-
1366 
-
1367  return m_LayerSupport->IsLayerSupported(LayerType::StandIn,
-
1368  infos,
-
1369  descriptor,
-
1370  EmptyOptional(),
-
1371  EmptyOptional(),
-
1372  reasonIfUnsupported);
-
1373 }
-
1374 
-
1375 
- -
1377  const TensorInfo& output,
-
1378  const StridedSliceDescriptor& descriptor,
-
1379  Optional<std::string&> reasonIfUnsupported)
-
1380 {
-
1381  TensorInfos infos{input, output};
-
1382 
-
1383  return m_LayerSupport->IsLayerSupported(LayerType::StridedSlice,
-
1384  infos,
-
1385  descriptor,
-
1386  EmptyOptional(),
+
1349  EmptyOptional(),
+
1350  reasonIfUnsupported);
+
1351 }
+
1352 
+ +
1354  const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
+
1355  const ViewsDescriptor& descriptor,
+
1356  Optional<std::string&> reasonIfUnsupported)
+
1357 {
+
1358  TensorInfos infos{input};
+
1359  for (TensorInfo outInfo : outputs)
+
1360  {
+
1361  infos.push_back(outInfo);
+
1362  }
+
1363 
+
1364  return m_LayerSupport->IsLayerSupported(LayerType::Splitter,
+
1365  infos,
+
1366  descriptor,
+
1367  EmptyOptional(),
+
1368  EmptyOptional(),
+
1369  reasonIfUnsupported);
+
1370 }
+
1371 
+
1372 bool LayerSupportHandle::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
+
1373  const TensorInfo& output,
+
1374  const StackDescriptor& descriptor,
+
1375  Optional<std::string&> reasonIfUnsupported)
+
1376 {
+
1377  TensorInfos infos;
+
1378  for (const TensorInfo* inputInfo : inputs)
+
1379  {
+
1380  infos.push_back(*inputInfo);
+
1381  }
+
1382  infos.push_back(output);
+
1383 
+
1384  return m_LayerSupport->IsLayerSupported(LayerType::Stack,
+
1385  infos,
+
1386  descriptor,
1387  EmptyOptional(),
-
1388  reasonIfUnsupported);
-
1389 }
-
1390 
- -
1392  const TensorInfo& input1,
-
1393  const TensorInfo& output,
-
1394  Optional<std::string&> reasonIfUnsupported)
-
1395 {
-
1396  TensorInfos infos{input0, input1, output};
-
1397 
-
1398  return m_LayerSupport->IsLayerSupported(LayerType::Subtraction,
-
1399  infos,
-
1400  BaseDescriptor(),
-
1401  EmptyOptional(),
-
1402  EmptyOptional(),
-
1403  reasonIfUnsupported);
-
1404 }
-
1405 
- -
1407  const TensorInfo& input1,
-
1408  const TensorInfo& output0,
-
1409  const TensorInfo& output1,
-
1410  Optional<std::string&> reasonIfUnsupported)
-
1411 {
-
1412  TensorInfos infos{input0, input1, output0, output1};
-
1413 
-
1414  return m_LayerSupport->IsLayerSupported(LayerType::Switch,
-
1415  infos,
-
1416  BaseDescriptor(),
-
1417  EmptyOptional(),
-
1418  EmptyOptional(),
-
1419  reasonIfUnsupported);
-
1420 }
-
1421 
- -
1423  const TensorInfo& output,
-
1424  const armnn::TileDescriptor &descriptor,
-
1425  Optional<std::string&> reasonIfUnsupported)
-
1426 {
-
1427  TensorInfos infos{input, output};
-
1428 
-
1429  return m_LayerSupport->IsLayerSupported(LayerType::Tile,
-
1430  infos,
-
1431  descriptor,
-
1432  EmptyOptional(),
-
1433  EmptyOptional(),
-
1434  reasonIfUnsupported);
-
1435 }
-
1436 
- -
1438  const TensorInfo& input,
-
1439  const TensorInfo& output,
-
1440  const TransposeConvolution2dDescriptor& descriptor,
-
1441  const TensorInfo& weights,
-
1442  const Optional<TensorInfo>& biases,
-
1443  Optional<std::string&> reasonIfUnsupported)
-
1444 {
-
1445  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
-
1446  TensorInfos infos{input, output, weights, biasesVal};
-
1447 
-
1448  return m_LayerSupport->IsLayerSupported(LayerType::TransposeConvolution2d,
-
1449  infos,
-
1450  descriptor,
-
1451  EmptyOptional(),
-
1452  EmptyOptional(),
-
1453  reasonIfUnsupported);
-
1454 }
-
1455 
- -
1457  const TensorInfo& output,
-
1458  const TransposeDescriptor& descriptor,
-
1459  Optional<std::string&> reasonIfUnsupported)
-
1460 {
-
1461  TensorInfos infos{input, output};
-
1462 
-
1463  return m_LayerSupport->IsLayerSupported(LayerType::Transpose,
-
1464  infos,
-
1465  descriptor,
-
1466  EmptyOptional(),
-
1467  EmptyOptional(),
-
1468  reasonIfUnsupported);
-
1469 }
-
1470 
- -
1472  const TensorInfo& outputStateIn,
-
1473  const TensorInfo& cellStateIn,
-
1474  const TensorInfo& outputStateOut,
-
1475  const TensorInfo& cellStateOut,
-
1476  const TensorInfo& output,
-
1477  const LstmDescriptor& descriptor,
-
1478  const LstmInputParamsInfo& paramsInfo,
-
1479  Optional<std::string&> reasonIfUnsupported)
-
1480 {
-
1481  TensorInfos infos{input, outputStateIn, cellStateIn, outputStateOut, cellStateOut, output};
-
1482 
-
1483  return m_LayerSupport->IsLayerSupported(LayerType::UnidirectionalSequenceLstm,
-
1484  infos,
-
1485  descriptor,
-
1486  paramsInfo,
-
1487  EmptyOptional(),
-
1488  reasonIfUnsupported);
-
1489 }
-
1490 
-
1491 }
+
1388  EmptyOptional(),
+
1389  reasonIfUnsupported);
+
1390 }
+
1391 
+
1392 bool LayerSupportHandle::IsStandInSupported(const std::vector<const TensorInfo*>& inputs,
+
1393  const std::vector<const TensorInfo*>& outputs,
+
1394  const StandInDescriptor& descriptor,
+
1395  Optional<std::string&> reasonIfUnsupported)
+
1396 {
+
1397  TensorInfos infos;
+
1398  for (const TensorInfo* inputInfo : inputs)
+
1399  {
+
1400  infos.push_back(*inputInfo);
+
1401  }
+
1402  for (const TensorInfo* outputInfo : outputs)
+
1403  {
+
1404  infos.push_back(*outputInfo);
+
1405  }
+
1406 
+
1407  return m_LayerSupport->IsLayerSupported(LayerType::StandIn,
+
1408  infos,
+
1409  descriptor,
+
1410  EmptyOptional(),
+
1411  EmptyOptional(),
+
1412  reasonIfUnsupported);
+
1413 }
+
1414 
+
1415 
+ +
1417  const TensorInfo& output,
+
1418  const StridedSliceDescriptor& descriptor,
+
1419  Optional<std::string&> reasonIfUnsupported)
+
1420 {
+
1421  TensorInfos infos{input, output};
+
1422 
+
1423  return m_LayerSupport->IsLayerSupported(LayerType::StridedSlice,
+
1424  infos,
+
1425  descriptor,
+
1426  EmptyOptional(),
+
1427  EmptyOptional(),
+
1428  reasonIfUnsupported);
+
1429 }
+
1430 
+ +
1432  const TensorInfo& input1,
+
1433  const TensorInfo& output,
+
1434  Optional<std::string&> reasonIfUnsupported)
+
1435 {
+
1436  TensorInfos infos{input0, input1, output};
+
1437 
+
1438  return m_LayerSupport->IsLayerSupported(LayerType::Subtraction,
+
1439  infos,
+
1440  BaseDescriptor(),
+
1441  EmptyOptional(),
+
1442  EmptyOptional(),
+
1443  reasonIfUnsupported);
+
1444 }
+
1445 
+ +
1447  const TensorInfo& input1,
+
1448  const TensorInfo& output0,
+
1449  const TensorInfo& output1,
+
1450  Optional<std::string&> reasonIfUnsupported)
+
1451 {
+
1452  TensorInfos infos{input0, input1, output0, output1};
+
1453 
+
1454  return m_LayerSupport->IsLayerSupported(LayerType::Switch,
+
1455  infos,
+
1456  BaseDescriptor(),
+
1457  EmptyOptional(),
+
1458  EmptyOptional(),
+
1459  reasonIfUnsupported);
+
1460 }
+
1461 
+ +
1463  const TensorInfo& output,
+
1464  const armnn::TileDescriptor &descriptor,
+
1465  Optional<std::string&> reasonIfUnsupported)
+
1466 {
+
1467  TensorInfos infos{input, output};
+
1468 
+
1469  return m_LayerSupport->IsLayerSupported(LayerType::Tile,
+
1470  infos,
+
1471  descriptor,
+
1472  EmptyOptional(),
+
1473  EmptyOptional(),
+
1474  reasonIfUnsupported);
+
1475 }
+
1476 
+ +
1478  const TensorInfo& input,
+
1479  const TensorInfo& output,
+
1480  const TransposeConvolution2dDescriptor& descriptor,
+
1481  const TensorInfo& weights,
+
1482  const Optional<TensorInfo>& biases,
+
1483  Optional<std::string&> reasonIfUnsupported)
+
1484 {
+
1485  TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
+
1486  TensorInfos infos{input, output, weights, biasesVal};
+
1487 
+
1488  return m_LayerSupport->IsLayerSupported(LayerType::TransposeConvolution2d,
+
1489  infos,
+
1490  descriptor,
+
1491  EmptyOptional(),
+
1492  EmptyOptional(),
+
1493  reasonIfUnsupported);
+
1494 }
+
1495 
+ +
1497  const TensorInfo& output,
+
1498  const TransposeDescriptor& descriptor,
+
1499  Optional<std::string&> reasonIfUnsupported)
+
1500 {
+
1501  TensorInfos infos{input, output};
+
1502 
+
1503  return m_LayerSupport->IsLayerSupported(LayerType::Transpose,
+
1504  infos,
+
1505  descriptor,
+
1506  EmptyOptional(),
+
1507  EmptyOptional(),
+
1508  reasonIfUnsupported);
+
1509 }
+
1510 
+ +
1512  const TensorInfo& outputStateIn,
+
1513  const TensorInfo& cellStateIn,
+
1514  const TensorInfo& outputStateOut,
+
1515  const TensorInfo& cellStateOut,
+
1516  const TensorInfo& output,
+
1517  const LstmDescriptor& descriptor,
+
1518  const LstmInputParamsInfo& paramsInfo,
+
1519  Optional<std::string&> reasonIfUnsupported)
+
1520 {
+
1521  TensorInfos infos{input, outputStateIn, cellStateIn, outputStateOut, cellStateOut, output};
+
1522 
+
1523  return m_LayerSupport->IsLayerSupported(LayerType::UnidirectionalSequenceLstm,
+
1524  infos,
+
1525  descriptor,
+
1526  paramsInfo,
+
1527  EmptyOptional(),
+
1528  reasonIfUnsupported);
+
1529 }
+
1530 
+
1531 }
@@ -1596,64 +1636,64 @@ $(document).ready(function(){initNavTree('_backend_helper_8cpp_source.html','');
A ViewsDescriptor for the SplitterLayer.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
-
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
A FullyConnectedDescriptor for the FullyConnectedLayer.
-
bool IsStackSupported(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsStackSupported(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
A QLstmDescriptor for the QLstmLayer.
-
bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
A QLstmDescriptor for the QLstmLayer.
+
bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsUndefined() const
Definition: BackendId.hpp:141
-
bool IsOutputSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsOutputSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool m_BiasEnabled
Enable/disable bias.
-
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
A Pooling3dDescriptor for the Pooling3dLayer.
-
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
A ResizeDescriptor for the ResizeLayer.
+
A ResizeDescriptor for the ResizeLayer.
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
-
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
A GatherDescriptor for the GatherLayer.
-
bool IsShapeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
A GatherDescriptor for the GatherLayer.
+
bool IsShapeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
A L2NormalizationDescriptor for the L2NormalizationLayer.
-
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
A NormalizationDescriptor for the NormalizationLayer.
-
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
A ChannelShuffleDescriptor for the ChannelShuffle operator.
-
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
bool IsReverseV2Supported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
A ChannelShuffleDescriptor for the ChannelShuffle operator.
+
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsReverseV2Supported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
bool IsGatherNdSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
bool IsElementwiseBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ElementwiseBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsGatherNdSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsElementwiseBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ElementwiseBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
A StackDescriptor for the StackLayer.
+
A StackDescriptor for the StackLayer.
-
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
LayerSupportHandle GetILayerSupportByBackendId(const armnn::BackendId &backend)
Convenience function to retrieve the ILayerSupportHandle for a backend.
bool IsConstant() const
Definition: Tensor.cpp:509
@@ -1661,38 +1701,40 @@ $(document).ready(function(){initNavTree('_backend_helper_8cpp_source.html',''); -
bool IsInputSupported(const TensorInfo &input, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsInputSupported(const TensorInfo &input, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
std::vector< TensorInfo > TensorInfos
-
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
bool IsDebugSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsDebugSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
#define ARMNN_LOG(severity)
Definition: Logging.hpp:212
A ElementwiseBinaryDescriptor for the ElementwiseBinaryLayer.
-
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
A FusedDescriptor for the FusedLayer.
+
bool IsFusedSupported(const std::vector< std::reference_wrapper< TensorInfo >> &inputs, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const FusedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool m_BiasEnabled
Enable/disable bias.
-
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
A PadDescriptor for the PadLayer.
+
A PadDescriptor for the PadLayer.
-
A TransposeDescriptor for the TransposeLayer.
+
A TransposeDescriptor for the TransposeLayer.
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
-
A SliceDescriptor for the SliceLayer.
+
A SliceDescriptor for the SliceLayer.
@@ -1700,77 +1742,80 @@ $(document).ready(function(){initNavTree('_backend_helper_8cpp_source.html','');
bool IsBatchMatMulSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
BackendRegistry & BackendRegistryInstance()
bool m_BiasEnabled
Enable/disable bias.
-
A ReshapeDescriptor for the ReshapeLayer.
-
bool IsRankSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
A ReshapeDescriptor for the ReshapeLayer.
+
bool IsRankSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
bool IsTileSupported(const TensorInfo &input, const TensorInfo &output, const TileDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsTileSupported(const TensorInfo &input, const TensorInfo &output, const TileDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool AsBool() const
Value getters.
A PermuteDescriptor for the PermuteLayer.
-
A BatchMatMulDescriptor for the BatchMatMul operator.
-
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
A BatchMatMulDescriptor for the BatchMatMul operator.
+
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
+
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
A Convolution3dDescriptor for the Convolution3dLayer.
+
bool IsBroadcastToSupported(const TensorInfo &input, const TensorInfo &output, const BroadcastToDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported)
+
Base class for all descriptors.
Definition: Descriptors.hpp:22
bool IsBackendRegistered(const BackendId &id) const
-
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
bool IsPooling3dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsPooling3dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool HasCapability(const std::string &name, const BackendCapabilities &capabilities)
Convenience function to check if a capability exists in a BackendCapabilites struct.
unsigned int AsUnsignedInt() const
-
bool IsConcatSupported(const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsConcatSupported(const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
FactoryFunction GetFactory(const BackendId &id) const
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
-
bool IsLogicalUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsLogicalUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
A Convolution2dDescriptor for the Convolution2dLayer.
-
bool IsFakeQuantizationSupported(const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsFakeQuantizationSupported(const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
std::string AsString() const
A FillDescriptor for the FillLayer.
-
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
const BackendOption & GetOption(size_t idx) const
-
A StandInDescriptor for the StandIn layer.
-
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
A StandInDescriptor for the StandIn layer.
+
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
Struct for the users to pass backend specific options.
-
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
An LstmDescriptor for the LstmLayer.
-
A StridedSliceDescriptor for the StridedSliceLayer.
+
An LstmDescriptor for the LstmLayer.
+
A StridedSliceDescriptor for the StridedSliceLayer.
-
bool IsPreCompiledSupported(const TensorInfo &input, const PreCompiledDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsPreCompiledSupported(const TensorInfo &input, const PreCompiledDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
+
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
bool IsBool() const
Type getters.
-
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+ +
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
@@ -1778,70 +1823,71 @@ $(document).ready(function(){initNavTree('_backend_helper_8cpp_source.html','');
size_t GetOptionCount() const noexcept
-
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
An OriginsDescriptor for the ConcatLayer.
unsigned int GetNumberOfCacheFiles(const armnn::BackendId &backend)
Returns the number of cached files if backend supports caching.
Copyright (c) 2021 ARM Limited and Contributors.
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
-
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
+
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
-
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
A PreCompiledDescriptor for the PreCompiledLayer.
-
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
A PreCompiledDescriptor for the PreCompiledLayer.
+
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
A Pooling2dDescriptor for the Pooling2dLayer.
+
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
-
bool IsConstantSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsConstantSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
A ReduceDescriptor for the REDUCE operators.
-
bool IsSwitchSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output0, const TensorInfo &output1, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
A ReduceDescriptor for the REDUCE operators.
+
bool IsSwitchSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output0, const TensorInfo &output1, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
-
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
bool IsStandInSupported(const std::vector< const TensorInfo * > &inputs, const std::vector< const TensorInfo * > &outputs, const StandInDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
A MeanDescriptor for the MeanLayer.
-
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsStandInSupported(const std::vector< const TensorInfo * > &inputs, const std::vector< const TensorInfo * > &outputs, const StandInDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
A MeanDescriptor for the MeanLayer.
+
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
- +
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
A SoftmaxDescriptor for the SoftmaxLayer.
-
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
-
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
+
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
bool has_value() const noexcept
Definition: Optional.hpp:53
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
Optional< const BackendOptions::BackendOption > GetCapability(const std::string &backendCapabilityName, const BackendCapabilities &capabilities)
Returns a BackendCapability if the backend lists the capability The BackendCapability must then be in...
-
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
+
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional())
bool HasMatchingCapability(const BackendOptions::BackendOption &capability, const BackendCapabilities &capabilities)
Convenience function to check if a given capability matches a capability in a BackendCapabilities str...