ArmNN
 20.02
WorkloadFactory.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <Layer.hpp>
7 #include <LayersFwd.hpp>
8 
9 #include <armnn/Types.hpp>
10 #include <armnn/LayerSupport.hpp>
11 #include <armnn/ILayerSupport.hpp>
13 
18 
20 
21 #include <boost/cast.hpp>
22 #include <boost/iterator/transform_iterator.hpp>
23 
24 #include <cstring>
25 #include <sstream>
26 
27 namespace armnn
28 {
29 
30 namespace
31 {
32 
33 const TensorInfo OverrideDataType(const TensorInfo& info, Optional<DataType> type)
34 {
35  if (!type)
36  {
37  return info;
38  }
39 
40  return TensorInfo(info.GetShape(), type.value(), info.GetQuantizationScale(), info.GetQuantizationOffset());
41 }
42 
43 } // anonymous namespace
44 
46  const IConnectableLayer& connectableLayer,
47  Optional<DataType> dataType,
48  std::string& outReasonIfUnsupported)
49 {
50  Optional<std::string&> reason = outReasonIfUnsupported;
51  bool result;
52  const Layer& layer = *(boost::polymorphic_downcast<const Layer*>(&connectableLayer));
53 
54  auto const& backendRegistry = BackendRegistryInstance();
55  if (!backendRegistry.IsBackendRegistered(backendId))
56  {
57  std::stringstream ss;
58  ss << connectableLayer.GetName() << " is not supported on " << backendId
59  << " because this backend is not registered.";
60 
61  outReasonIfUnsupported = ss.str();
62  return false;
63  }
64 
65  auto backendFactory = backendRegistry.GetFactory(backendId);
66  auto backendObject = backendFactory();
67  auto layerSupportObject = backendObject->GetLayerSupport();
68 
69  switch(layer.GetType())
70  {
72  {
73  auto cLayer = boost::polymorphic_downcast<const ActivationLayer*>(&layer);
74  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
75  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
76  result = layerSupportObject->IsActivationSupported(
77  OverrideDataType(input, dataType),
78  OverrideDataType(output, dataType),
79  cLayer->GetParameters(),
80  reason);
81  break;
82  }
84  {
85  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
86  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
87  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
88  result = layerSupportObject->IsAdditionSupported(
89  OverrideDataType(input0, dataType),
90  OverrideDataType(input1, dataType),
91  OverrideDataType(output, dataType),
92  reason);
93  break;
94  }
96  {
97  auto cLayer = boost::polymorphic_downcast<const ArgMinMaxLayer*>(&layer);
98  const ArgMinMaxDescriptor& descriptor = cLayer->GetParameters();
99 
100  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
101  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
102  result = layerSupportObject->IsArgMinMaxSupported(
103  OverrideDataType(input, dataType),
104  OverrideDataType(output, DataType::Signed32),
105  descriptor,
106  reason);
107  break;
108  }
110  {
111  auto cLayer = boost::polymorphic_downcast<const BatchNormalizationLayer*>(&layer);
112  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
113  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
114  const TensorInfo& mean = cLayer->m_Mean->GetTensorInfo();
115  const TensorInfo& var = cLayer->m_Variance->GetTensorInfo();
116  const TensorInfo& beta = cLayer->m_Beta->GetTensorInfo();
117  const TensorInfo& gamma = cLayer->m_Gamma->GetTensorInfo();
118  result = layerSupportObject->IsBatchNormalizationSupported(
119  OverrideDataType(input, dataType),
120  OverrideDataType(output, dataType),
121  OverrideDataType(mean, dataType),
122  OverrideDataType(var, dataType),
123  OverrideDataType(beta, dataType),
124  OverrideDataType(gamma, dataType),
125  cLayer->GetParameters(),
126  reason);
127  break;
128  }
130  {
131  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
132  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
133  auto cLayer = boost::polymorphic_downcast<const BatchToSpaceNdLayer*>(&layer);
134 
135  result = layerSupportObject->IsBatchToSpaceNdSupported(OverrideDataType(input, dataType),
136  OverrideDataType(output, dataType),
137  cLayer->GetParameters(),
138  reason);
139  break;
140  }
142  {
143  auto cLayer = boost::polymorphic_downcast<const ComparisonLayer*>(&layer);
144 
145  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
146  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
147  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
148 
149  result = layerSupportObject->IsComparisonSupported(OverrideDataType(input0, dataType),
150  OverrideDataType(input1, dataType),
151  OverrideDataType(output, DataType::Boolean),
152  cLayer->GetParameters(),
153  reason);
154  break;
155  }
156  case LayerType::Constant:
157  {
158  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
159  result = layerSupportObject->IsConstantSupported(OverrideDataType(output, dataType), reason);
160  break;
161  }
163  {
164  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
165  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
166  result = layerSupportObject->IsConvertFp16ToFp32Supported(input, output, reason);
167  break;
168  }
170  {
171  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
172  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
173  result = layerSupportObject->IsConvertFp32ToFp16Supported(input, output, reason);
174  break;
175  }
177  {
178  auto cLayer = boost::polymorphic_downcast<const Convolution2dLayer*>(&layer);
179 
180  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
181  dataType);
182  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
183  BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
184 
185  const Convolution2dDescriptor& descriptor = cLayer->GetParameters();
186 
187  // Construct optional biases object based on the value of m_BiasEnabled
188  Optional<TensorInfo> biases;
189  if (descriptor.m_BiasEnabled)
190  {
191  biases =
192  OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
193  }
194 
195  result = layerSupportObject->IsConvolution2dSupported(
196  input,
197  output,
198  descriptor,
199  OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
200  biases,
201  reason);
202  break;
203  }
204  case LayerType::Debug:
205  {
206  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
207  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
208 
209  result = layerSupportObject->IsDebugSupported(OverrideDataType(input, dataType),
210  OverrideDataType(output, dataType),
211  reason);
212  break;
213  }
215  {
216  auto cLayer = boost::polymorphic_downcast<const DepthToSpaceLayer*>(&layer);
217 
218  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
219  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
220 
221  result = layerSupportObject->IsDepthToSpaceSupported(OverrideDataType(input, dataType),
222  OverrideDataType(output, dataType),
223  cLayer->GetParameters(),
224  reason);
225  break;
226  }
228  {
229  auto cLayer = boost::polymorphic_downcast<const DepthwiseConvolution2dLayer*>(&layer);
230  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
231  dataType);
232  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
233  BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
234 
235  const DepthwiseConvolution2dDescriptor& descriptor = cLayer->GetParameters();
236 
237  // Construct optional biases object based on the value of m_BiasEnabled
238  Optional<TensorInfo> biases;
239  if (descriptor.m_BiasEnabled)
240  {
241  biases =
242  OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
243  }
244 
245  result = layerSupportObject->IsDepthwiseConvolutionSupported(
246  input,
247  output,
248  descriptor,
249  OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
250  biases,
251  reason);
252  break;
253  }
255  {
256  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
257  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
258 
259  result = layerSupportObject->IsDequantizeSupported(input,
260  OverrideDataType(output, dataType),
261  reason);
262  break;
263  }
265  {
266  auto cLayer = boost::polymorphic_downcast<const DetectionPostProcessLayer*>(&layer);
269  const TensorInfo& anchors = cLayer->m_Anchors->GetTensorInfo();
270 
271  const TensorInfo& detectionBoxes = layer.GetOutputSlot(0).GetTensorInfo();
272  const TensorInfo& detectionClasses = layer.GetOutputSlot(1).GetTensorInfo();
273  const TensorInfo& detectionScores = layer.GetOutputSlot(2).GetTensorInfo();
274  const TensorInfo& numDetections = layer.GetOutputSlot(3).GetTensorInfo();
275 
276  const DetectionPostProcessDescriptor& descriptor = cLayer->GetParameters();
277  result = layerSupportObject->IsDetectionPostProcessSupported(boxEncodings,
278  scores,
279  anchors,
280  detectionBoxes,
281  detectionClasses,
282  detectionScores,
283  numDetections,
284  descriptor,
285  reason);
286  break;
287  }
289  {
290  auto cLayer = boost::polymorphic_downcast<const ElementwiseUnaryLayer*>(&layer);
291 
292  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
293  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
294 
295  result = layerSupportObject->IsElementwiseUnarySupported(OverrideDataType(input, dataType),
296  OverrideDataType(output, dataType),
297  cLayer->GetParameters(),
298  reason);
299  break;
300  }
302  {
303  auto cLayer = boost::polymorphic_downcast<const FakeQuantizationLayer*>(&layer);
304  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
305  result = layerSupportObject->IsFakeQuantizationSupported(OverrideDataType(input, dataType),
306  cLayer->GetParameters(),
307  reason);
308  break;
309  }
310  case LayerType::Floor:
311  {
312  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
313  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
314  result = layerSupportObject->IsFloorSupported(OverrideDataType(input, dataType),
315  OverrideDataType(output, dataType),
316  reason);
317  break;
318  }
320  {
321  auto cLayer = boost::polymorphic_downcast<const FullyConnectedLayer*>(&layer);
322  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
323  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
324  BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
325 
326  TensorInfo biasInfo;
327  const TensorInfo * biasInfoPtr = nullptr;
328  static const TensorInfo dummyBFloat16Bias(TensorShape({1,1,1,1}), DataType::BFloat16);
329  static const TensorInfo dummyFloat16Bias(TensorShape({1,1,1,1}), DataType::Float16);
330  static const TensorInfo dummyFloat32Bias(TensorShape({1,1,1,1}), DataType::Float32);
331  static const TensorInfo dummyQA8Bias(TensorShape({1,1,1,1}), DataType::Signed32);
332 
333  const FullyConnectedDescriptor& descriptor = cLayer->GetParameters();
334  if (descriptor.m_BiasEnabled)
335  {
336  BOOST_ASSERT(cLayer->m_Bias.get() != nullptr);
337  biasInfo = OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
338  biasInfoPtr = &biasInfo;
339  }
340  else
341  {
342  // If biases are not enabled pass a dummy tensorinfo for the validation
343  switch(input.GetDataType())
344  {
345  case DataType::BFloat16:
346  {
347  biasInfoPtr = &dummyBFloat16Bias;
348  break;
349  }
350  case DataType::Float16:
351  {
352  biasInfoPtr = &dummyFloat16Bias;
353  break;
354  }
355  case DataType::Float32:
356  {
357  biasInfoPtr = &dummyFloat32Bias;
358  break;
359  }
360  case DataType::QAsymmU8:
361  case DataType::QAsymmS8:
362  case DataType::QSymmS8:
363  case DataType::QSymmS16:
364  {
365  biasInfoPtr = &dummyQA8Bias;
366  break;
367  }
368  default:
369  {
370  BOOST_ASSERT_MSG(false, "Unexpected bias type");
371  }
372  }
373  }
374 
375  result = layerSupportObject->IsFullyConnectedSupported(
376  OverrideDataType(input, dataType),
377  OverrideDataType(output, dataType),
378  OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
379  *biasInfoPtr,
380  descriptor,
381  reason);
382  break;
383  }
384  case LayerType::Gather:
385  {
386  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
387  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
388  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
389  result = layerSupportObject->IsGatherSupported(OverrideDataType(input0, dataType),
390  input1,
391  OverrideDataType(output, dataType),
392  reason);
393  break;
394  }
395  case LayerType::Input:
396  {
397  const TensorInfo& input = layer.GetOutputSlot(0).GetTensorInfo();
398  result = layerSupportObject->IsInputSupported(OverrideDataType(input, dataType), reason);
399  break;
400  }
402  {
403  auto cLayer = boost::polymorphic_downcast<const InstanceNormalizationLayer*>(&layer);
404  const InstanceNormalizationDescriptor& descriptor = cLayer->GetParameters();
405 
406  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
407  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
408 
409  result = layerSupportObject->IsInstanceNormalizationSupported(
410  OverrideDataType(input, dataType),
411  OverrideDataType(output, dataType),
412  descriptor,
413  reason);
414  break;
415  }
417  {
418  auto cLayer = boost::polymorphic_downcast<const L2NormalizationLayer*>(&layer);
419  const L2NormalizationDescriptor& descriptor = cLayer->GetParameters();
420 
421  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
422  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
423 
424  result = layerSupportObject->IsL2NormalizationSupported(
425  OverrideDataType(input, dataType),
426  OverrideDataType(output, dataType),
427  descriptor,
428  reason);
429  break;
430  }
432  {
433  auto cLayer = boost::polymorphic_downcast<const LogSoftmaxLayer*>(&layer);
434 
435  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
436  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
437 
438  result = layerSupportObject->IsLogSoftmaxSupported(OverrideDataType(input, dataType),
439  OverrideDataType(output, dataType),
440  cLayer->GetParameters(),
441  reason);
442  break;
443  }
444  case LayerType::Lstm:
445  {
446  auto cLayer = boost::polymorphic_downcast<const LstmLayer*>(&layer);
447  const LstmDescriptor& descriptor = cLayer->GetParameters();
448 
449  // All inputs.
450  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
451  dataType);
452  const TensorInfo& outputStateIn = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
453  dataType);
454  const TensorInfo& cellStateIn = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
455  dataType);
456  // All outputs
457  const TensorInfo& scratchBuffer = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
458  const TensorInfo& outputStateOut = OverrideDataType(layer.GetOutputSlot(1).GetTensorInfo(), dataType);
459  const TensorInfo& cellStateOut = OverrideDataType(layer.GetOutputSlot(2).GetTensorInfo(), dataType);
460  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(3).GetTensorInfo(), dataType);
461 
462  // Basic parameters
463  const TensorInfo& inputToForgetWeights
464  = OverrideDataType(cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), dataType);
465  const TensorInfo& inputToCellWeights
466  = OverrideDataType(cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), dataType);
467  const TensorInfo& inputToOutputWeights
468  = OverrideDataType(cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), dataType);
469  const TensorInfo& recurrentToForgetWeights
470  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), dataType);
471  const TensorInfo& recurrentToCellWeights
472  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), dataType);
473  const TensorInfo& recurrentToOutputWeights
474  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), dataType);
475  const TensorInfo& forgetGateBias
476  = OverrideDataType(cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), dataType);
477  const TensorInfo& cellBias
478  = OverrideDataType(cLayer->m_BasicParameters.m_CellBias->GetTensorInfo(), dataType);
479  const TensorInfo& outputGateBias
480  = OverrideDataType(cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo(), dataType);
481 
482  LstmInputParamsInfo paramsInfo;
483 
484  paramsInfo.m_InputToForgetWeights = &inputToForgetWeights;
485  paramsInfo.m_InputToCellWeights = &inputToCellWeights;
486  paramsInfo.m_InputToOutputWeights = &inputToOutputWeights;
487  paramsInfo.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
488  paramsInfo.m_RecurrentToCellWeights = &recurrentToCellWeights;
489  paramsInfo.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
490  paramsInfo.m_ForgetGateBias = &forgetGateBias;
491  paramsInfo.m_CellBias = &cellBias;
492  paramsInfo.m_OutputGateBias = &outputGateBias;
493 
494 
495  // Optional parameters
496  TensorInfo optInputToInputWeights;
497  TensorInfo optRecurrentToInputWeights;
498  TensorInfo optCellToInputWeights;
499  TensorInfo optInputGateBias;
500  TensorInfo optProjectionWeights;
501  TensorInfo optProjectionBias;
502  TensorInfo optCellToForgetWeights;
503  TensorInfo optCellToOutputWeights;
504  TensorInfo optInputLayerNormWeights;
505  TensorInfo optForgetLayerNormWeights;
506  TensorInfo optCellLayerNormWeights;
507  TensorInfo optOutputLayerNormWeights;
508 
509  if(!descriptor.m_CifgEnabled)
510  {
511  optInputToInputWeights =
512  OverrideDataType(cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), dataType);
513  paramsInfo.m_InputToInputWeights = &optInputToInputWeights;
514 
515  optRecurrentToInputWeights =
516  OverrideDataType(cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), dataType);
517  paramsInfo.m_RecurrentToInputWeights = &optRecurrentToInputWeights;
518  if (cLayer->m_CifgParameters.m_CellToInputWeights != nullptr)
519  {
520  optCellToInputWeights =
521  OverrideDataType(cLayer->m_CifgParameters.m_CellToInputWeights->GetTensorInfo(), dataType);
522  paramsInfo.m_CellToInputWeights = &optCellToInputWeights;
523  }
524  optInputGateBias =
525  OverrideDataType(cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo(), dataType);
526  paramsInfo.m_InputGateBias = &optInputGateBias;
527  }
528 
529  if(descriptor.m_ProjectionEnabled)
530  {
531  optProjectionWeights =
532  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), dataType);
533  paramsInfo.m_ProjectionWeights = &optProjectionWeights;
534  if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
535  {
536  optProjectionBias =
537  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), dataType);
538  paramsInfo.m_ProjectionBias = &optProjectionBias;
539  }
540  }
541 
542  if(descriptor.m_PeepholeEnabled)
543  {
544  optCellToForgetWeights =
545  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), dataType);
546  paramsInfo.m_CellToForgetWeights = &optCellToForgetWeights;
547  optCellToOutputWeights =
548  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), dataType);
549  paramsInfo.m_CellToOutputWeights = &optCellToOutputWeights;
550  }
551 
552  if(descriptor.m_LayerNormEnabled)
553  {
554  if (!descriptor.m_CifgEnabled)
555  {
556  optInputLayerNormWeights = OverrideDataType(
557  cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(), dataType);
558  paramsInfo.m_InputLayerNormWeights = &optInputLayerNormWeights;
559  }
560 
561  optForgetLayerNormWeights = OverrideDataType(
562  cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(), dataType);
563  paramsInfo.m_ForgetLayerNormWeights = &optForgetLayerNormWeights;
564 
565  optCellLayerNormWeights = OverrideDataType(
566  cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(), dataType);
567  paramsInfo.m_CellLayerNormWeights = &optCellLayerNormWeights;
568 
569  optOutputLayerNormWeights = OverrideDataType(
570  cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(), dataType);
571  paramsInfo.m_OutputLayerNormWeights = &optOutputLayerNormWeights;
572  }
573 
574  result = layerSupportObject->IsLstmSupported(
575  input,
576  outputStateIn,
577  cellStateIn,
578  scratchBuffer,
579  outputStateOut,
580  cellStateOut,
581  output,
582  descriptor,
583  paramsInfo,
584  reason);
585  break;
586  }
587  case LayerType::Maximum:
588  {
589  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
590  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
591  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
592 
593  result = layerSupportObject->IsMaximumSupported(OverrideDataType(input0, dataType),
594  OverrideDataType(input1, dataType),
595  OverrideDataType(output, dataType),
596  reason);
597  break;
598  }
599  case LayerType::MemCopy:
600  {
601  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
602  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
603 
604  result = layerSupportObject->IsMemCopySupported(OverrideDataType(input, dataType),
605  OverrideDataType(output, dataType),
606  reason);
607  break;
608  }
610  {
611  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
612  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
613 
614  result = layerSupportObject->IsMemImportSupported(OverrideDataType(input, dataType),
615  OverrideDataType(output, dataType),
616  reason);
617  break;
618  }
619  case LayerType::Merge:
620  {
621  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
622  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
623  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
624 
625  result = layerSupportObject->IsMergeSupported(OverrideDataType(input0, dataType),
626  OverrideDataType(input1, dataType),
627  OverrideDataType(output, dataType),
628  reason);
629  break;
630  }
631  case LayerType::Concat:
632  {
633  auto cLayer = boost::polymorphic_downcast<const ConcatLayer*>(&layer);
634 
635  // Get vector of all inputs.
636  auto getTensorInfo = [&dataType](const InputSlot& slot)
637  {
638  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
639  };
640  auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfo);
641  auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfo);
642  std::vector<TensorInfo> inputs(beginI, endI);
643 
644  auto getTensorInfoPtr = [](const TensorInfo& info)
645  {
646  return &info;
647  };
648  auto beginPtr = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr);
649  auto endPtr = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr);
650  std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
651 
652  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
653 
654  result = layerSupportObject->IsConcatSupported(inputPtrs, output, cLayer->GetParameters(), reason);
655 
656 
657  break;
658  }
660  {
661  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
662  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
663  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
664  result = layerSupportObject->IsMultiplicationSupported(
665  OverrideDataType(input0, dataType),
666  OverrideDataType(input1, dataType),
667  OverrideDataType(output, dataType),
668  reason);
669  break;
670  }
672  {
673  auto cLayer = boost::polymorphic_downcast<const NormalizationLayer*>(&layer);
674  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
675  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
676  result = layerSupportObject->IsNormalizationSupported(OverrideDataType(input, dataType),
677  OverrideDataType(output, dataType),
678  cLayer->GetParameters(),
679  reason);
680  break;
681  }
682  case LayerType::Output:
683  {
684  const TensorInfo& output = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
685  result = layerSupportObject->IsOutputSupported(OverrideDataType(output, dataType), reason);
686  break;
687  }
688  case LayerType::Permute:
689  {
690  auto cLayer = boost::polymorphic_downcast<const PermuteLayer*>(&layer);
691  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
692  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
693  result = layerSupportObject->IsPermuteSupported(OverrideDataType(input, dataType),
694  OverrideDataType(output, dataType),
695  cLayer->GetParameters(),
696  reason);
697  break;
698  }
699  case LayerType::Pad:
700  {
701  auto cLayer = boost::polymorphic_downcast<const PadLayer*>(&layer);
702  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
703  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
704  result = layerSupportObject->IsPadSupported(
705  OverrideDataType(input, dataType),
706  OverrideDataType(output, dataType),
707  cLayer->GetParameters(),
708  reason);
709  break;
710  }
712  {
713  auto cLayer = boost::polymorphic_downcast<const Pooling2dLayer*>(&layer);
714  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
715  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
716  result = layerSupportObject->IsPooling2dSupported(OverrideDataType(input, dataType),
717  OverrideDataType(output, dataType),
718  cLayer->GetParameters(),
719  reason);
720  break;
721  }
723  {
724  auto cLayer = boost::polymorphic_downcast<const PreCompiledLayer*>(&layer);
725  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
726  result = layerSupportObject->IsPreCompiledSupported(OverrideDataType(input, dataType),
727  cLayer->GetParameters(),
728  reason);
729  break;
730  }
731  case LayerType::Quantize:
732  {
733  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
734  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
735  result = layerSupportObject->IsQuantizeSupported(input, output, reason);
736  break;
737  }
739  {
740  auto cLayer = boost::polymorphic_downcast<const QuantizedLstmLayer*>(&layer);
741 
742  // Inputs
743  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
744  const TensorInfo& previousCellStateIn = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
745  const TensorInfo& previousOutputIn = layer.GetInputSlot(2).GetConnection()->GetTensorInfo();
746 
747  // Outputs
748  const TensorInfo& cellStateOut = layer.GetOutputSlot(0).GetTensorInfo();
749  const TensorInfo& output = layer.GetOutputSlot(1).GetTensorInfo();
750 
751  // QuantizedLstm parameters
752  QuantizedLstmInputParamsInfo paramsInfo;
753 
754  paramsInfo.m_InputToInputWeights =
755  &cLayer->m_QuantizedLstmParameters.m_InputToInputWeights->GetTensorInfo();
756  paramsInfo.m_InputToForgetWeights =
757  &cLayer->m_QuantizedLstmParameters.m_InputToForgetWeights->GetTensorInfo();
758  paramsInfo.m_InputToCellWeights =
759  &cLayer->m_QuantizedLstmParameters.m_InputToCellWeights->GetTensorInfo();
760  paramsInfo.m_InputToOutputWeights =
761  &cLayer->m_QuantizedLstmParameters.m_InputToOutputWeights->GetTensorInfo();
762 
763  paramsInfo.m_RecurrentToInputWeights =
764  &cLayer->m_QuantizedLstmParameters.m_RecurrentToInputWeights->GetTensorInfo();
765  paramsInfo.m_RecurrentToForgetWeights =
766  &cLayer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights->GetTensorInfo();
767  paramsInfo.m_RecurrentToCellWeights =
768  &cLayer->m_QuantizedLstmParameters.m_RecurrentToCellWeights->GetTensorInfo();
769  paramsInfo.m_RecurrentToOutputWeights =
770  &cLayer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights->GetTensorInfo();
771 
772  paramsInfo.m_InputGateBias =
773  &cLayer->m_QuantizedLstmParameters.m_InputGateBias->GetTensorInfo();
774  paramsInfo.m_ForgetGateBias =
775  &cLayer->m_QuantizedLstmParameters.m_ForgetGateBias->GetTensorInfo();
776  paramsInfo.m_CellBias =
777  &cLayer->m_QuantizedLstmParameters.m_CellBias->GetTensorInfo();
778  paramsInfo.m_OutputGateBias =
779  &cLayer->m_QuantizedLstmParameters.m_OutputGateBias->GetTensorInfo();;
780 
781  result = layerSupportObject->IsQuantizedLstmSupported(input,
782  previousCellStateIn,
783  previousOutputIn,
784  cellStateOut,
785  output,
786  paramsInfo,
787  reason);
788  break;
789  }
790  case LayerType::Division:
791  {
792  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
793  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
794  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
795  result = layerSupportObject->IsDivisionSupported(
796  OverrideDataType(input0, dataType),
797  OverrideDataType(input1, dataType),
798  OverrideDataType(output, dataType),
799  reason);
800  break;
801  }
802  case LayerType::Reshape:
803  {
804  auto cLayer = boost::polymorphic_downcast<const ReshapeLayer*>(&layer);
805  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
806  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
807  result = layerSupportObject->IsReshapeSupported(OverrideDataType(input, dataType),
808  OverrideDataType(output, dataType),
809  cLayer->GetParameters(),
810  reason);
811  break;
812  }
813  case LayerType::Resize:
814  {
815  auto cLayer = boost::polymorphic_downcast<const ResizeLayer*>(&layer);
816  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
817  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
818  result = layerSupportObject->IsResizeSupported(OverrideDataType(input, dataType),
819  OverrideDataType(output, dataType),
820  cLayer->GetParameters(),
821  reason);
822  break;
823  }
824  case LayerType::Slice:
825  {
826  auto cLayer = boost::polymorphic_downcast<const SliceLayer*>(&layer);
827 
828  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
829  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
830 
831  result = layerSupportObject->IsSliceSupported(OverrideDataType(input, dataType),
832  OverrideDataType(output, dataType),
833  cLayer->GetParameters(),
834  reason);
835  break;
836  }
837  case LayerType::Softmax:
838  {
839  auto cLayer = boost::polymorphic_downcast<const SoftmaxLayer*>(&layer);
840  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
841  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
842  result = layerSupportObject->IsSoftmaxSupported(OverrideDataType(input, dataType),
843  OverrideDataType(output, dataType),
844  cLayer->GetParameters(),
845  reason);
846  break;
847  }
849  {
850  auto cLayer = boost::polymorphic_downcast<const SpaceToBatchNdLayer*>(&layer);
851  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
852  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
853  result = layerSupportObject->IsSpaceToBatchNdSupported(OverrideDataType(input, dataType),
854  OverrideDataType(output, dataType),
855  cLayer->GetParameters(),
856  reason);
857  break;
858  }
860  {
861  auto cLayer = boost::polymorphic_downcast<const SpaceToDepthLayer*>(&layer);
862 
863  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
864  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
865 
866  result = layerSupportObject->IsSpaceToDepthSupported(OverrideDataType(input, dataType),
867  OverrideDataType(output, dataType),
868  cLayer->GetParameters(),
869  reason);
870  break;
871  }
872  case LayerType::Splitter:
873  {
874  auto cLayer = boost::polymorphic_downcast<const SplitterLayer*>(&layer);
875  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
876 
877  // Get vector of all outputs.
878  auto getTensorInfo = [&dataType](const OutputSlot& slot)
879  {
880  return OverrideDataType(slot.GetTensorInfo(), dataType);
881  };
882  auto beginI = boost::make_transform_iterator(layer.GetOutputSlots().begin(), getTensorInfo);
883  auto endI = boost::make_transform_iterator(layer.GetOutputSlots().end(), getTensorInfo);
884  std::vector<TensorInfo> outputs(beginI, endI);
885 
886  const std::vector<std::reference_wrapper<TensorInfo>> outputPtrs(outputs.begin(), outputs.end());
887 
888  result = layerSupportObject->IsSplitterSupported(OverrideDataType(input, dataType),
889  outputPtrs,
890  cLayer->GetParameters(),
891  reason);
892  break;
893  }
894  case LayerType::Stack:
895  {
896  auto cLayer = boost::polymorphic_downcast<const StackLayer*>(&layer);
897 
898  // Get vector of all inputs.
899  auto getTensorInfo = [&dataType](const InputSlot& slot)
900  {
901  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
902  };
903  auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfo);
904  auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfo);
905  std::vector<TensorInfo> inputs(beginI, endI);
906 
907  auto getTensorInfoPtr = [](const TensorInfo& info)
908  {
909  return &info;
910  };
911  auto beginPtr = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr);
912  auto endPtr = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr);
913  std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
914 
915  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
916 
917  result = layerSupportObject->IsStackSupported(inputPtrs, output, cLayer->GetParameters(), reason);
918 
919  break;
920  }
921  case LayerType::StandIn:
922  {
923  auto cLayer = boost::polymorphic_downcast<const StandInLayer*>(&layer);
924 
925  // Get vector of all inputs.
926  auto getTensorInfoIn = [&dataType](const InputSlot& slot)
927  {
928  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
929  };
930  auto getTensorInfoOut = [&dataType](const OutputSlot& slot)
931  {
932  return OverrideDataType(slot.GetTensorInfo(), dataType);
933  };
934  auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfoIn);
935  auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfoIn);
936  std::vector<TensorInfo> inputs(beginI, endI);
937 
938  auto beginO = boost::make_transform_iterator(layer.GetOutputSlots().begin(), getTensorInfoOut);
939  auto endO = boost::make_transform_iterator(layer.GetOutputSlots().end(), getTensorInfoOut);
940  std::vector<TensorInfo> outputs(beginO, endO);
941 
942 
943  auto getTensorInfoPtr = [](const TensorInfo& info)
944  {
945  return &info;
946  };
947  auto beginPtrI = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr);
948  auto endPtrI = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr);
949  std::vector<const TensorInfo*> inputPtrs(beginPtrI, endPtrI);
950 
951  auto beginPtrO = boost::make_transform_iterator(outputs.begin(), getTensorInfoPtr);
952  auto endPtrO = boost::make_transform_iterator(outputs.end(), getTensorInfoPtr);
953  std::vector<const TensorInfo*> outputPtrs(beginPtrO, endPtrO);
954 
955 
956  result = layerSupportObject->IsStandInSupported(inputPtrs,
957  outputPtrs,
958  cLayer->GetParameters(),
959  reason);
960  break;
961  }
963  {
964  auto cLayer = boost::polymorphic_downcast<const StridedSliceLayer*>(&layer);
965  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
966  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
967  result = layerSupportObject->IsStridedSliceSupported(OverrideDataType(input, dataType),
968  OverrideDataType(output, dataType),
969  cLayer->GetParameters(),
970  reason);
971  break;
972  }
974  {
975  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
976  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
977  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
978  result = layerSupportObject->IsSubtractionSupported(
979  OverrideDataType(input0, dataType),
980  OverrideDataType(input1, dataType),
981  OverrideDataType(output, dataType),
982  reason);
983  break;
984  }
985  case LayerType::Switch:
986  {
987  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
988  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
989  const TensorInfo& output0 = layer.GetOutputSlot(0).GetTensorInfo();
990  const TensorInfo& output1 = layer.GetOutputSlot(1).GetTensorInfo();
991  result = layerSupportObject->IsSwitchSupported(OverrideDataType(input0, dataType),
992  OverrideDataType(input1, dataType),
993  OverrideDataType(output0, dataType),
994  OverrideDataType(output1, dataType),
995  reason);
996  break;
997  }
998  case LayerType::Mean:
999  {
1000  auto cLayer = boost::polymorphic_downcast<const MeanLayer*>(&layer);
1001  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1002  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1003  result = layerSupportObject->IsMeanSupported(
1004  OverrideDataType(input, dataType),
1005  OverrideDataType(output, dataType),
1006  cLayer->GetParameters(),
1007  reason);
1008  break;
1009  }
1010  case LayerType::Minimum:
1011  {
1012  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1013  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1014  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1015  result = layerSupportObject->IsMinimumSupported(OverrideDataType(input0, dataType),
1016  OverrideDataType(input1, dataType),
1017  OverrideDataType(output, dataType),
1018  reason);
1019  break;
1020  }
1021  case LayerType::Prelu:
1022  {
1023  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1024  const TensorInfo& alpha = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1025  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1026  result = layerSupportObject->IsPreluSupported(OverrideDataType(input, dataType),
1027  OverrideDataType(alpha, dataType),
1028  OverrideDataType(output, dataType),
1029  reason);
1030  break;
1031  }
1032  case LayerType::Transpose:
1033  {
1034  auto cLayer = boost::polymorphic_downcast<const TransposeLayer*>(&layer);
1035  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1036  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1037  result = layerSupportObject->IsTransposeSupported(OverrideDataType(input, dataType),
1038  OverrideDataType(output, dataType),
1039  cLayer->GetParameters(),
1040  reason);
1041  break;
1042  }
1044  {
1045  auto cLayer = boost::polymorphic_downcast<const TransposeConvolution2dLayer*>(&layer);
1046 
1047  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
1048  dataType);
1049  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
1050 
1051  const TransposeConvolution2dDescriptor& descriptor = cLayer->GetParameters();
1052 
1053  Optional<TensorInfo> biases;
1054  if (descriptor.m_BiasEnabled)
1055  {
1056  BOOST_ASSERT(cLayer->m_Bias.get() != nullptr);
1057  biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(),
1058  GetBiasTypeFromWeightsType(dataType));
1059  }
1060 
1061  BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
1062  const TensorInfo weights = OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType);
1063 
1064  result = layerSupportObject->IsTransposeConvolution2dSupported(input,
1065  output,
1066  descriptor,
1067  weights,
1068  biases,
1069  reason);
1070 
1071  break;
1072  }
1073  default:
1074  {
1075  BOOST_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
1076  reason.value() = "Unrecognised layer type";
1077  result = false;
1078  break;
1079  }
1080  }
1081  return result;
1082 }
1083 
1085  Optional<DataType> dataType,
1086  std::string& outReasonIfUnsupported)
1087 {
1088  auto layer = boost::polymorphic_downcast<const Layer*>(&connectableLayer);
1089  return IsLayerSupported(layer->GetBackendId(), connectableLayer, dataType, outReasonIfUnsupported);
1090 }
1091 
1092 // Default Implementations
1093 std::unique_ptr<IWorkload> IWorkloadFactory::CreateAbs(const AbsQueueDescriptor& /*descriptor*/,
1094  const WorkloadInfo& /*info*/) const
1095 {
1096  return std::unique_ptr<IWorkload>();
1097 }
1098 
1099 std::unique_ptr<IWorkload> IWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& /*descriptor*/,
1100  const WorkloadInfo& /*info*/) const
1101 {
1102  return std::unique_ptr<IWorkload>();
1103 }
1104 
1105 std::unique_ptr<IWorkload> IWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& /*descriptor*/,
1106  const WorkloadInfo& /*info*/) const
1107 {
1108  return std::unique_ptr<IWorkload>();
1109 }
1110 
1111 std::unique_ptr<IWorkload> IWorkloadFactory::CreateArgMinMax(const ArgMinMaxQueueDescriptor& /*descriptor*/,
1112  const WorkloadInfo& /*info*/) const
1113 {
1114  return std::unique_ptr<IWorkload>();
1115 }
1116 
1118  const BatchNormalizationQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1119 {
1120  return std::unique_ptr<IWorkload>();
1121 }
1122 
1124  const WorkloadInfo& /*Info*/) const
1125 {
1126  return std::unique_ptr<IWorkload>();
1127 }
1128 
1129 std::unique_ptr<IWorkload> IWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& /*descriptor*/,
1130  const WorkloadInfo& /*info*/) const
1131 {
1132  return std::unique_ptr<IWorkload>();
1133 }
1134 
1135 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& /*descriptor*/,
1136  const WorkloadInfo& /*info*/) const
1137 {
1138  return std::unique_ptr<IWorkload>();
1139 }
1140 
1141 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& /*descriptor*/,
1142  const WorkloadInfo& /*info*/) const
1143 {
1144  return std::unique_ptr<IWorkload>();
1145 }
1146 
1148  const WorkloadInfo& /*info*/) const
1149 {
1150  return std::unique_ptr<IWorkload>();
1151 }
1152 
1154  const WorkloadInfo& /*info*/) const
1155 {
1156  return std::unique_ptr<IWorkload>();
1157 }
1158 
1159 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& /*descriptor*/,
1160  const WorkloadInfo& /*info*/) const
1161 {
1162  return std::unique_ptr<IWorkload>();
1163 }
1164 
1165 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDebug(const DebugQueueDescriptor& /*descriptor*/,
1166  const WorkloadInfo& /*info*/) const
1167 {
1168  return std::unique_ptr<IWorkload>();
1169 }
1170 
1171 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDepthToSpace(const DepthToSpaceQueueDescriptor& /*descriptor*/,
1172  const WorkloadInfo& /*info*/) const
1173 {
1174  return std::unique_ptr<IWorkload>();
1175 }
1176 
1178  const DepthwiseConvolution2dQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1179 {
1180  return std::unique_ptr<IWorkload>();
1181 }
1182 
1183 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDequantize(
1184  const DequantizeQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1185 {
1186  return std::unique_ptr<IWorkload>();
1187 }
1188 
1190  const DetectionPostProcessQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1191 {
1192  return std::unique_ptr<IWorkload>();
1193 }
1194 
1195 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDivision(const DivisionQueueDescriptor& /*descriptor*/,
1196  const WorkloadInfo& /*info*/) const
1197 {
1198  return std::unique_ptr<IWorkload>();
1199 }
1200 
1202  const WorkloadInfo& /*info*/) const
1203 {
1204  return std::unique_ptr<IWorkload>();
1205 }
1206 
1207 std::unique_ptr<IWorkload> IWorkloadFactory::CreateEqual(const EqualQueueDescriptor& /*descriptor*/,
1208  const WorkloadInfo& /*Info*/) const
1209 {
1210  return std::unique_ptr<IWorkload>();
1211 }
1212 
1214  const WorkloadInfo& /*info*/) const
1215 {
1216  return std::unique_ptr<IWorkload>();
1217 }
1218 
1219 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFloor(const FloorQueueDescriptor& /*descriptor*/,
1220  const WorkloadInfo& /*info*/) const
1221 {
1222  return std::unique_ptr<IWorkload>();
1223 }
1224 
1225 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFullyConnected(const FullyConnectedQueueDescriptor& /*descriptor*/,
1226  const WorkloadInfo& /*info*/) const
1227 {
1228  return std::unique_ptr<IWorkload>();
1229 }
1230 
1231 std::unique_ptr<IWorkload> IWorkloadFactory::CreateGather(const GatherQueueDescriptor& /*descriptor*/,
1232  const WorkloadInfo& /*info*/) const
1233 {
1234  return std::unique_ptr<IWorkload>();
1235 }
1236 
1237 std::unique_ptr<IWorkload> IWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& /*descriptor*/,
1238  const WorkloadInfo& /*info*/) const
1239 {
1240  return std::unique_ptr<IWorkload>();
1241 }
1242 
1244  const InstanceNormalizationQueueDescriptor& /*descriptor*/,
1245  const WorkloadInfo& /*info*/) const
1246 {
1247  return std::unique_ptr<IWorkload>();
1248 }
1249 
1251  const WorkloadInfo& /*info*/) const
1252 {
1253  return std::unique_ptr<IWorkload>();
1254 }
1255 
1256 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogSoftmax(const LogSoftmaxQueueDescriptor& /*descriptor*/,
1257  const WorkloadInfo& /*info*/) const
1258 {
1259  return std::unique_ptr<IWorkload>();
1260 }
1261 
1262 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLstm(const LstmQueueDescriptor& /*descriptor*/,
1263  const WorkloadInfo& /*info*/) const
1264 {
1265  return std::unique_ptr<IWorkload>();
1266 }
1267 
1268 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMaximum(const MaximumQueueDescriptor& /*descriptor*/,
1269  const WorkloadInfo& /*info*/) const
1270 {
1271  return std::unique_ptr<IWorkload>();
1272 }
1273 
1274 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMean(const MeanQueueDescriptor& /*descriptor*/,
1275  const WorkloadInfo& /*Info*/) const
1276 {
1277  return std::unique_ptr<IWorkload>();
1278 }
1279 
1280 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& /*descriptor*/,
1281  const WorkloadInfo& /*info*/) const
1282 {
1283  return std::unique_ptr<IWorkload>();
1284 }
1285 
1286 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemImport(const MemImportQueueDescriptor& /*descriptor*/,
1287  const WorkloadInfo& /*info*/) const
1288 {
1289  return std::unique_ptr<IWorkload>();
1290 }
1291 
1292 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerge(const MergeQueueDescriptor& /*descriptor*/,
1293  const WorkloadInfo& /*info*/) const
1294 {
1295  return std::unique_ptr<IWorkload>();
1296 }
1297 
1298 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerger(const MergerQueueDescriptor& /*descriptor*/,
1299  const WorkloadInfo& /*info*/) const
1300 {
1301  return std::unique_ptr<IWorkload>();
1302 }
1303 
1304 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& /*descriptor*/,
1305  const WorkloadInfo& /*info*/) const
1306 {
1307  return std::unique_ptr<IWorkload>();
1308 }
1309 
1310 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMultiplication(const MultiplicationQueueDescriptor& /*descriptor*/,
1311  const WorkloadInfo& /*info*/) const
1312 {
1313  return std::unique_ptr<IWorkload>();
1314 }
1315 
1316 std::unique_ptr<IWorkload> IWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& /*descriptor*/,
1317  const WorkloadInfo& /*info*/) const
1318 {
1319  return std::unique_ptr<IWorkload>();
1320 }
1321 
1322 std::unique_ptr<IWorkload> IWorkloadFactory::CreateOutput(const OutputQueueDescriptor& /*descriptor*/,
1323  const WorkloadInfo& /*info*/) const
1324 {
1325  return std::unique_ptr<IWorkload>();
1326 }
1327 
1328 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePad(const PadQueueDescriptor& /*descriptor*/,
1329  const WorkloadInfo& /*Info*/) const
1330 {
1331  return std::unique_ptr<IWorkload>();
1332 }
1333 
1334 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& /*descriptor*/,
1335  const WorkloadInfo& /*info*/) const
1336 {
1337  return std::unique_ptr<IWorkload>();
1338 }
1339 
1340 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& /*descriptor*/,
1341  const WorkloadInfo& /*info*/) const
1342 {
1343  return std::unique_ptr<IWorkload>();
1344 }
1345 
1346 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& /*descriptor*/,
1347  const WorkloadInfo& /*info*/) const
1348 {
1349  return std::unique_ptr<IWorkload>();
1350 }
1351 
1352 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePrelu(const PreluQueueDescriptor &/*descriptor*/,
1353  const WorkloadInfo &/*info*/) const
1354 {
1355  return std::unique_ptr<IWorkload>();
1356 }
1357 
1358 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& /*descriptor*/,
1359  const WorkloadInfo& /*Info*/) const
1360 {
1361  return std::unique_ptr<IWorkload>();
1362 }
1363 
1364 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& /*descriptor*/,
1365  const WorkloadInfo& /*info*/) const
1366 {
1367  return std::unique_ptr<IWorkload>();
1368 }
1369 
1370 std::unique_ptr<IWorkload> IWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& /*descriptor*/,
1371  const WorkloadInfo& /*info*/) const
1372 {
1373  return std::unique_ptr<IWorkload>();
1374 }
1375 
1376 std::unique_ptr<IWorkload> IWorkloadFactory::CreateResizeBilinear(const ResizeBilinearQueueDescriptor& /*descriptor*/,
1377  const WorkloadInfo& /*info*/) const
1378 {
1379  return std::unique_ptr<IWorkload>();
1380 }
1381 
1382 std::unique_ptr<IWorkload> IWorkloadFactory::CreateResize(const ResizeQueueDescriptor& /*descriptor*/,
1383  const WorkloadInfo& /*info*/) const
1384 {
1385  return std::unique_ptr<IWorkload>();
1386 }
1387 
1388 std::unique_ptr<IWorkload> IWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& /*descriptor*/,
1389  const WorkloadInfo& /*info*/) const
1390 {
1391  return std::unique_ptr<IWorkload>();
1392 }
1393 
1394 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSlice(const SliceQueueDescriptor& /*descriptor*/,
1395  const WorkloadInfo& /*info*/) const
1396 {
1397  return std::unique_ptr<IWorkload>();
1398 }
1399 
1400 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& /*descriptor*/,
1401  const WorkloadInfo& /*info*/) const
1402 {
1403  return std::unique_ptr<IWorkload>();
1404 }
1405 
1406 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& /*descriptor*/,
1407  const WorkloadInfo& /*info*/) const
1408 {
1409  return std::unique_ptr<IWorkload>();
1410 }
1411 
1412 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& /*descriptor*/,
1413  const WorkloadInfo& /*info*/) const
1414 {
1415  return std::unique_ptr<IWorkload>();
1416 }
1417 
1418 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& /*descriptor*/,
1419  const WorkloadInfo& /*info*/) const
1420 {
1421  return std::unique_ptr<IWorkload>();
1422 }
1423 
1424 std::unique_ptr<IWorkload> IWorkloadFactory::CreateStack(const StackQueueDescriptor& /*descriptor*/,
1425  const WorkloadInfo& /*info*/) const
1426 {
1427  return std::unique_ptr<IWorkload>();
1428 }
1429 
1430 std::unique_ptr<IWorkload> IWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& /*descriptor*/,
1431  const WorkloadInfo& /*info*/) const
1432 {
1433  return std::unique_ptr<IWorkload>();
1434 }
1435 
1436 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& /*descriptor*/,
1437  const WorkloadInfo& /*info*/) const
1438 {
1439  return std::unique_ptr<IWorkload>();
1440 }
1441 
1442 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSwitch(const SwitchQueueDescriptor& /*descriptor*/,
1443  const WorkloadInfo& /*info*/) const
1444 {
1445  return std::unique_ptr<IWorkload>();
1446 }
1447 
1448 std::unique_ptr<IWorkload> IWorkloadFactory::CreateTranspose(const TransposeQueueDescriptor& /*descriptor*/,
1449  const WorkloadInfo& /*info*/) const
1450 {
1451  return std::unique_ptr<IWorkload>();
1452 }
1453 
1455  const TransposeConvolution2dQueueDescriptor& /*descriptor*/,
1456  const WorkloadInfo& /*info*/) const
1457 {
1458  return std::unique_ptr<IWorkload>();
1459 }
1460 
1461 } // namepsace armnn
virtual std::unique_ptr< IWorkload > CreateSplitter(const SplitterQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateBatchNormalization(const BatchNormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateDebug(const DebugQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMemCopy(const MemCopyQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateL2Normalization(const L2NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
virtual std::unique_ptr< IWorkload > CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateMultiplication(const MultiplicationQueueDescriptor &descriptor, const WorkloadInfo &info) const
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
virtual std::unique_ptr< IWorkload > CreateInstanceNormalization(const InstanceNormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateGreater(const GreaterQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateArgMinMax(const ArgMinMaxQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMerger(const MergerQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateLogSoftmax(const LogSoftmaxQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateResizeBilinear(const ResizeBilinearQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateStridedSlice(const StridedSliceQueueDescriptor &descriptor, const WorkloadInfo &Info) const
A Convolution2dDescriptor for the Convolution2dLayer.
virtual std::unique_ptr< IWorkload > CreateStack(const StackQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateLstm(const LstmQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateFakeQuantization(const FakeQuantizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateQuantizedLstm(const QuantizedLstmQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateConstant(const ConstantQueueDescriptor &descriptor, const WorkloadInfo &info) const
BackendRegistry & BackendRegistryInstance()
virtual std::unique_ptr< IWorkload > CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor &descriptor, const WorkloadInfo &Info) const
std::vector< float > boxEncodings({ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f })
virtual std::unique_ptr< IWorkload > CreateAbs(const AbsQueueDescriptor &descriptor, const WorkloadInfo &info) const
Copyright (c) 2020 ARM Limited.
const std::vector< InputSlot > & GetInputSlots() const
Definition: Layer.hpp:231
virtual std::unique_ptr< IWorkload > CreateActivation(const ActivationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateRsqrt(const RsqrtQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateTranspose(const TransposeQueueDescriptor &descriptor, const WorkloadInfo &info) const
const IOutputSlot * GetConnection() const override
Definition: Layer.hpp:199
virtual std::unique_ptr< IWorkload > CreateDivision(const DivisionQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMaximum(const MaximumQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMerge(const MergeQueueDescriptor &descriptor, const WorkloadInfo &info) const
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:310
armnn::Optional< armnn::DataType > GetBiasTypeFromWeightsType(armnn::Optional< armnn::DataType > weightsType)
virtual std::unique_ptr< IWorkload > CreateEqual(const EqualQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateDetectionPostProcess(const DetectionPostProcessQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor &descriptor, const WorkloadInfo &info) const
An LstmDescriptor for the LstmLayer.
virtual std::unique_ptr< IWorkload > CreateResize(const ResizeQueueDescriptor &descriptor, const WorkloadInfo &info) const
A L2NormalizationDescriptor for the L2NormalizationLayer.
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:43
virtual std::unique_ptr< IWorkload > CreateQuantize(const QuantizeQueueDescriptor &descriptor, const WorkloadInfo &Info) const
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool m_BiasEnabled
Enable/disable bias.
const TensorInfo * m_InputToForgetWeights
Definition: LstmParams.hpp:90
virtual std::unique_ptr< IWorkload > CreateSwitch(const SwitchQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreatePad(const PadQueueDescriptor &descriptor, const WorkloadInfo &Info) const
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
virtual std::unique_ptr< IWorkload > CreateNormalization(const NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateReshape(const ReshapeQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreatePermute(const PermuteQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateComparison(const ComparisonQueueDescriptor &descriptor, const WorkloadInfo &Info) const
const std::vector< OutputSlot > & GetOutputSlots() const
Definition: Layer.hpp:232
virtual std::unique_ptr< IWorkload > CreatePooling2d(const Pooling2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSpaceToDepth(const SpaceToDepthQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateGather(const GatherQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMinimum(const MinimumQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< float > scores({ 0.0f, 0.9f, 0.8f, 0.0f, 0.75f, 0.72f, 0.0f, 0.6f, 0.5f, 0.0f, 0.93f, 0.95f, 0.0f, 0.5f, 0.4f, 0.0f, 0.3f, 0.2f })
virtual std::unique_ptr< IWorkload > CreateDepthToSpace(const DepthToSpaceQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSlice(const SliceQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateAddition(const AdditionQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMean(const MeanQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateOutput(const OutputQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerType GetType() const
Definition: Layer.hpp:259
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:312
virtual const TensorInfo & GetTensorInfo() const =0
virtual std::unique_ptr< IWorkload > CreateSoftmax(const SoftmaxQueueDescriptor &descriptor, const WorkloadInfo &info) const
Contains information about inputs and outputs to a layer.
virtual const char * GetName() const =0
Returns the name of the layer.
virtual std::unique_ptr< IWorkload > CreateFullyConnected(const FullyConnectedQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateFloor(const FloorQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMemImport(const MemImportQueueDescriptor &descriptor, const WorkloadInfo &info) const
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
virtual std::unique_ptr< IWorkload > CreateSubtraction(const SubtractionQueueDescriptor &descriptor, const WorkloadInfo &info) const
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:63
virtual std::unique_ptr< IWorkload > CreatePreCompiled(const PreCompiledQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor &descriptor, const WorkloadInfo &info) const
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
virtual std::unique_ptr< IWorkload > CreateConvolution2d(const Convolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreatePrelu(const PreluQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })
virtual std::unique_ptr< IWorkload > CreateDequantize(const DequantizeQueueDescriptor &descriptor, const WorkloadInfo &info) const