ArmNN
 20.05
IWorkloadFactory Class Referenceabstract

#include <WorkloadFactory.hpp>

Inheritance diagram for IWorkloadFactory:
RefWorkloadFactory SampleDynamicWorkloadFactory WorkloadFactoryBase ClWorkloadFactory NeonWorkloadFactory

Public Member Functions

virtual ~IWorkloadFactory ()
 
virtual const BackendIdGetBackendId () const =0
 
virtual bool SupportsSubTensors () const =0
 
virtual std::unique_ptr< ITensorHandleCreateSubTensorHandle (ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
 
virtual std::unique_ptr< IWorkloadCreateInput (const InputQueueDescriptor &descriptor, const WorkloadInfo &info) const =0
 
virtual std::unique_ptr< ITensorHandleCreateTensorHandle (const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
 
virtual std::unique_ptr< ITensorHandleCreateTensorHandle (const TensorInfo &tensorInfo, DataLayout dataLayout, const bool IsMemoryManaged=true) const =0
 
virtual std::unique_ptr< IWorkloadCreateAbs (const AbsQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateActivation (const ActivationQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateAddition (const AdditionQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateArgMinMax (const ArgMinMaxQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateBatchNormalization (const BatchNormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateBatchToSpaceNd (const BatchToSpaceNdQueueDescriptor &descriptor, const WorkloadInfo &Info) const
 
virtual std::unique_ptr< IWorkloadCreateComparison (const ComparisonQueueDescriptor &descriptor, const WorkloadInfo &Info) const
 
virtual std::unique_ptr< IWorkloadCreateConcat (const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateConstant (const ConstantQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateConvertBf16ToFp32 (const ConvertBf16ToFp32QueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateConvertFp16ToFp32 (const ConvertFp16ToFp32QueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateConvertFp32ToBf16 (const ConvertFp32ToBf16QueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateConvertFp32ToFp16 (const ConvertFp32ToFp16QueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateConvolution2d (const Convolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateDebug (const DebugQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateDepthToSpace (const DepthToSpaceQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateDepthwiseConvolution2d (const DepthwiseConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateDequantize (const DequantizeQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateDetectionPostProcess (const DetectionPostProcessQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateDivision (const DivisionQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateElementwiseUnary (const ElementwiseUnaryQueueDescriptor &descriptor, const WorkloadInfo &Info) const
 
virtual std::unique_ptr< IWorkloadCreateEqual (const EqualQueueDescriptor &descriptor, const WorkloadInfo &Info) const
 
virtual std::unique_ptr< IWorkloadCreateFakeQuantization (const FakeQuantizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateFloor (const FloorQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateFullyConnected (const FullyConnectedQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateGather (const GatherQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateGreater (const GreaterQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateInstanceNormalization (const InstanceNormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateL2Normalization (const L2NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateLogSoftmax (const LogSoftmaxQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateLstm (const LstmQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateMaximum (const MaximumQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateMean (const MeanQueueDescriptor &descriptor, const WorkloadInfo &Info) const
 
virtual std::unique_ptr< IWorkloadCreateMemCopy (const MemCopyQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateMemImport (const MemImportQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateMerge (const MergeQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateMerger (const MergerQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateMinimum (const MinimumQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateMultiplication (const MultiplicationQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateNormalization (const NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateOutput (const OutputQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreatePad (const PadQueueDescriptor &descriptor, const WorkloadInfo &Info) const
 
virtual std::unique_ptr< IWorkloadCreatePermute (const PermuteQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreatePooling2d (const Pooling2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreatePreCompiled (const PreCompiledQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreatePrelu (const PreluQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateQuantize (const QuantizeQueueDescriptor &descriptor, const WorkloadInfo &Info) const
 
virtual std::unique_ptr< IWorkloadCreateQLstm (const QLstmQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateQuantizedLstm (const QuantizedLstmQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateReshape (const ReshapeQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateResize (const ResizeQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateResizeBilinear (const ResizeBilinearQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateRsqrt (const RsqrtQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateSlice (const SliceQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateSoftmax (const SoftmaxQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateSpaceToBatchNd (const SpaceToBatchNdQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateSpaceToDepth (const SpaceToDepthQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateSubtraction (const SubtractionQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateSplitter (const SplitterQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateStack (const StackQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateStridedSlice (const StridedSliceQueueDescriptor &descriptor, const WorkloadInfo &Info) const
 
virtual std::unique_ptr< IWorkloadCreateSwitch (const SwitchQueueDescriptor &descriptor, const WorkloadInfo &Info) const
 
virtual std::unique_ptr< IWorkloadCreateTranspose (const TransposeQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateTransposeConvolution2d (const TransposeConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
 

Static Public Member Functions

static bool IsLayerSupported (const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
 
static bool IsLayerSupported (const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
 

Detailed Description

Definition at line 21 of file WorkloadFactory.hpp.

Constructor & Destructor Documentation

◆ ~IWorkloadFactory()

virtual ~IWorkloadFactory ( )
inlinevirtual

Definition at line 24 of file WorkloadFactory.hpp.

References ARMNN_DEPRECATED_MSG, IWorkloadFactory::CreateAbs(), IWorkloadFactory::CreateActivation(), IWorkloadFactory::CreateAddition(), IWorkloadFactory::CreateArgMinMax(), IWorkloadFactory::CreateBatchNormalization(), IWorkloadFactory::CreateBatchToSpaceNd(), IWorkloadFactory::CreateComparison(), IWorkloadFactory::CreateConcat(), IWorkloadFactory::CreateConstant(), IWorkloadFactory::CreateConvertBf16ToFp32(), IWorkloadFactory::CreateConvertFp16ToFp32(), IWorkloadFactory::CreateConvertFp32ToBf16(), IWorkloadFactory::CreateConvertFp32ToFp16(), IWorkloadFactory::CreateConvolution2d(), IWorkloadFactory::CreateDebug(), IWorkloadFactory::CreateDepthToSpace(), IWorkloadFactory::CreateDepthwiseConvolution2d(), IWorkloadFactory::CreateDequantize(), IWorkloadFactory::CreateDetectionPostProcess(), IWorkloadFactory::CreateDivision(), IWorkloadFactory::CreateElementwiseUnary(), IWorkloadFactory::CreateEqual(), IWorkloadFactory::CreateFakeQuantization(), IWorkloadFactory::CreateFloor(), IWorkloadFactory::CreateFullyConnected(), IWorkloadFactory::CreateGather(), IWorkloadFactory::CreateGreater(), IWorkloadFactory::CreateInput(), IWorkloadFactory::CreateInstanceNormalization(), IWorkloadFactory::CreateL2Normalization(), IWorkloadFactory::CreateLogSoftmax(), IWorkloadFactory::CreateLstm(), IWorkloadFactory::CreateMaximum(), IWorkloadFactory::CreateMean(), IWorkloadFactory::CreateMemCopy(), IWorkloadFactory::CreateMemImport(), IWorkloadFactory::CreateMerge(), IWorkloadFactory::CreateMerger(), IWorkloadFactory::CreateMinimum(), IWorkloadFactory::CreateMultiplication(), IWorkloadFactory::CreateNormalization(), IWorkloadFactory::CreateOutput(), IWorkloadFactory::CreatePad(), IWorkloadFactory::CreatePermute(), IWorkloadFactory::CreatePooling2d(), IWorkloadFactory::CreatePreCompiled(), IWorkloadFactory::CreatePrelu(), IWorkloadFactory::CreateQLstm(), IWorkloadFactory::CreateQuantize(), IWorkloadFactory::CreateQuantizedLstm(), IWorkloadFactory::CreateReshape(), IWorkloadFactory::CreateResize(), IWorkloadFactory::CreateResizeBilinear(), IWorkloadFactory::CreateRsqrt(), IWorkloadFactory::CreateSlice(), IWorkloadFactory::CreateSoftmax(), IWorkloadFactory::CreateSpaceToBatchNd(), IWorkloadFactory::CreateSpaceToDepth(), IWorkloadFactory::CreateSplitter(), IWorkloadFactory::CreateStack(), IWorkloadFactory::CreateStridedSlice(), IWorkloadFactory::CreateSubTensorHandle(), IWorkloadFactory::CreateSubtraction(), IWorkloadFactory::CreateSwitch(), IWorkloadFactory::CreateTensorHandle(), IWorkloadFactory::CreateTranspose(), IWorkloadFactory::CreateTransposeConvolution2d(), IWorkloadFactory::GetBackendId(), armnn::Info, armnn::info, IWorkloadFactory::IsLayerSupported(), and IWorkloadFactory::SupportsSubTensors().

24 { }

Member Function Documentation

◆ CreateAbs()

std::unique_ptr< IWorkload > CreateAbs ( const AbsQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in RefWorkloadFactory, ClWorkloadFactory, NeonWorkloadFactory, and WorkloadFactoryBase.

Definition at line 1201 of file WorkloadFactory.cpp.

Referenced by AbsLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1203 {
1204  return std::unique_ptr<IWorkload>();
1205 }

◆ CreateActivation()

std::unique_ptr< IWorkload > CreateActivation ( const ActivationQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

◆ CreateAddition()

std::unique_ptr< IWorkload > CreateAddition ( const AdditionQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

◆ CreateArgMinMax()

std::unique_ptr< IWorkload > CreateArgMinMax ( const ArgMinMaxQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in RefWorkloadFactory, ClWorkloadFactory, NeonWorkloadFactory, and WorkloadFactoryBase.

Definition at line 1219 of file WorkloadFactory.cpp.

Referenced by ArgMinMaxLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1221 {
1222  return std::unique_ptr<IWorkload>();
1223 }

◆ CreateBatchNormalization()

std::unique_ptr< IWorkload > CreateBatchNormalization ( const BatchNormalizationQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in RefWorkloadFactory, ClWorkloadFactory, NeonWorkloadFactory, and WorkloadFactoryBase.

Definition at line 1225 of file WorkloadFactory.cpp.

Referenced by CompareBatchNormTest(), BatchNormalizationLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1227 {
1228  return std::unique_ptr<IWorkload>();
1229 }

◆ CreateBatchToSpaceNd()

std::unique_ptr< IWorkload > CreateBatchToSpaceNd ( const BatchToSpaceNdQueueDescriptor descriptor,
const WorkloadInfo Info 
) const
virtual

Reimplemented in RefWorkloadFactory, WorkloadFactoryBase, ClWorkloadFactory, and NeonWorkloadFactory.

Definition at line 1231 of file WorkloadFactory.cpp.

Referenced by BatchToSpaceNdLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1233 {
1234  return std::unique_ptr<IWorkload>();
1235 }

◆ CreateComparison()

std::unique_ptr< IWorkload > CreateComparison ( const ComparisonQueueDescriptor descriptor,
const WorkloadInfo Info 
) const
virtual

Reimplemented in RefWorkloadFactory, WorkloadFactoryBase, ClWorkloadFactory, and NeonWorkloadFactory.

Definition at line 1237 of file WorkloadFactory.cpp.

Referenced by ComparisonLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1239 {
1240  return std::unique_ptr<IWorkload>();
1241 }

◆ CreateConcat()

std::unique_ptr< IWorkload > CreateConcat ( const ConcatQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

◆ CreateConstant()

std::unique_ptr< IWorkload > CreateConstant ( const ConstantQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in RefWorkloadFactory, WorkloadFactoryBase, ClWorkloadFactory, and NeonWorkloadFactory.

Definition at line 1249 of file WorkloadFactory.cpp.

Referenced by ConstantLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1251 {
1252  return std::unique_ptr<IWorkload>();
1253 }

◆ CreateConvertBf16ToFp32()

std::unique_ptr< IWorkload > CreateConvertBf16ToFp32 ( const ConvertBf16ToFp32QueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in RefWorkloadFactory, and NeonWorkloadFactory.

Definition at line 1255 of file WorkloadFactory.cpp.

Referenced by ConvertBf16ToFp32Test(), ConvertBf16ToFp32Layer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1257 {
1258  return std::unique_ptr<IWorkload>();
1259 }

◆ CreateConvertFp16ToFp32()

std::unique_ptr< IWorkload > CreateConvertFp16ToFp32 ( const ConvertFp16ToFp32QueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in RefWorkloadFactory, WorkloadFactoryBase, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1261 of file WorkloadFactory.cpp.

Referenced by ConvertFp16ToFp32Layer::CreateWorkload(), SimpleConvertFp16ToFp32Test(), and IWorkloadFactory::~IWorkloadFactory().

1263 {
1264  return std::unique_ptr<IWorkload>();
1265 }

◆ CreateConvertFp32ToBf16()

std::unique_ptr< IWorkload > CreateConvertFp32ToBf16 ( const ConvertFp32ToBf16QueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in RefWorkloadFactory, and NeonWorkloadFactory.

Definition at line 1267 of file WorkloadFactory.cpp.

Referenced by ConvertFp32ToBf16Test(), ConvertFp32ToBf16Layer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1269 {
1270  return std::unique_ptr<IWorkload>();
1271 }

◆ CreateConvertFp32ToFp16()

std::unique_ptr< IWorkload > CreateConvertFp32ToFp16 ( const ConvertFp32ToFp16QueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in RefWorkloadFactory, NeonWorkloadFactory, WorkloadFactoryBase, and ClWorkloadFactory.

Definition at line 1273 of file WorkloadFactory.cpp.

Referenced by ConvertFp32ToFp16Layer::CreateWorkload(), SimpleConvertFp32ToFp16Test(), and IWorkloadFactory::~IWorkloadFactory().

1275 {
1276  return std::unique_ptr<IWorkload>();
1277 }

◆ CreateConvolution2d()

std::unique_ptr< IWorkload > CreateConvolution2d ( const Convolution2dQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

◆ CreateDebug()

std::unique_ptr< IWorkload > CreateDebug ( const DebugQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in RefWorkloadFactory, WorkloadFactoryBase, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1285 of file WorkloadFactory.cpp.

Referenced by DebugLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1287 {
1288  return std::unique_ptr<IWorkload>();
1289 }

◆ CreateDepthToSpace()

std::unique_ptr< IWorkload > CreateDepthToSpace ( const DepthToSpaceQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in RefWorkloadFactory, WorkloadFactoryBase, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1291 of file WorkloadFactory.cpp.

Referenced by DepthToSpaceLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1293 {
1294  return std::unique_ptr<IWorkload>();
1295 }

◆ CreateDepthwiseConvolution2d()

◆ CreateDequantize()

std::unique_ptr< IWorkload > CreateDequantize ( const DequantizeQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in RefWorkloadFactory, WorkloadFactoryBase, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1303 of file WorkloadFactory.cpp.

Referenced by DequantizeLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1305 {
1306  return std::unique_ptr<IWorkload>();
1307 }

◆ CreateDetectionPostProcess()

std::unique_ptr< IWorkload > CreateDetectionPostProcess ( const DetectionPostProcessQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in RefWorkloadFactory, WorkloadFactoryBase, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1309 of file WorkloadFactory.cpp.

Referenced by DetectionPostProcessLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1311 {
1312  return std::unique_ptr<IWorkload>();
1313 }

◆ CreateDivision()

std::unique_ptr< IWorkload > CreateDivision ( const DivisionQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in RefWorkloadFactory, WorkloadFactoryBase, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1315 of file WorkloadFactory.cpp.

Referenced by DivisionLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1317 {
1318  return std::unique_ptr<IWorkload>();
1319 }

◆ CreateElementwiseUnary()

std::unique_ptr< IWorkload > CreateElementwiseUnary ( const ElementwiseUnaryQueueDescriptor descriptor,
const WorkloadInfo Info 
) const
virtual

Reimplemented in RefWorkloadFactory, WorkloadFactoryBase, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1321 of file WorkloadFactory.cpp.

Referenced by ElementwiseUnaryLayer::CreateWorkload(), CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1323 {
1324  return std::unique_ptr<IWorkload>();
1325 }

◆ CreateEqual()

std::unique_ptr< IWorkload > CreateEqual ( const EqualQueueDescriptor descriptor,
const WorkloadInfo Info 
) const
virtual

Reimplemented in RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1327 of file WorkloadFactory.cpp.

Referenced by IWorkloadFactory::~IWorkloadFactory().

1329 {
1330  return std::unique_ptr<IWorkload>();
1331 }

◆ CreateFakeQuantization()

std::unique_ptr< IWorkload > CreateFakeQuantization ( const FakeQuantizationQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in RefWorkloadFactory, and WorkloadFactoryBase.

Definition at line 1333 of file WorkloadFactory.cpp.

Referenced by FakeQuantizationLayer::CreateWorkload(), FakeQuantizationTest(), and IWorkloadFactory::~IWorkloadFactory().

1335 {
1336  return std::unique_ptr<IWorkload>();
1337 }

◆ CreateFloor()

std::unique_ptr< IWorkload > CreateFloor ( const FloorQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in RefWorkloadFactory, WorkloadFactoryBase, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1339 of file WorkloadFactory.cpp.

Referenced by FloorLayer::CreateWorkload(), SimpleFloorTest(), and IWorkloadFactory::~IWorkloadFactory().

1341 {
1342  return std::unique_ptr<IWorkload>();
1343 }

◆ CreateFullyConnected()

std::unique_ptr< IWorkload > CreateFullyConnected ( const FullyConnectedQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in RefWorkloadFactory, WorkloadFactoryBase, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1345 of file WorkloadFactory.cpp.

Referenced by FullyConnectedLayer::CreateWorkload(), SimpleFullyConnectedTestImpl(), and IWorkloadFactory::~IWorkloadFactory().

1347 {
1348  return std::unique_ptr<IWorkload>();
1349 }

◆ CreateGather()

std::unique_ptr< IWorkload > CreateGather ( const GatherQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in RefWorkloadFactory, WorkloadFactoryBase, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1351 of file WorkloadFactory.cpp.

Referenced by GatherLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1353 {
1354  return std::unique_ptr<IWorkload>();
1355 }

◆ CreateGreater()

std::unique_ptr< IWorkload > CreateGreater ( const GreaterQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1357 of file WorkloadFactory.cpp.

Referenced by IWorkloadFactory::~IWorkloadFactory().

1359 {
1360  return std::unique_ptr<IWorkload>();
1361 }

◆ CreateInput()

virtual std::unique_ptr<IWorkload> CreateInput ( const InputQueueDescriptor descriptor,
const WorkloadInfo info 
) const
pure virtual

◆ CreateInstanceNormalization()

std::unique_ptr< IWorkload > CreateInstanceNormalization ( const InstanceNormalizationQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in RefWorkloadFactory, WorkloadFactoryBase, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1363 of file WorkloadFactory.cpp.

Referenced by InstanceNormalizationLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1366 {
1367  return std::unique_ptr<IWorkload>();
1368 }

◆ CreateL2Normalization()

std::unique_ptr< IWorkload > CreateL2Normalization ( const L2NormalizationQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in RefWorkloadFactory, WorkloadFactoryBase, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1370 of file WorkloadFactory.cpp.

Referenced by L2NormalizationLayer::CreateWorkload(), L2Normalization2dShapeTest(), and IWorkloadFactory::~IWorkloadFactory().

1372 {
1373  return std::unique_ptr<IWorkload>();
1374 }

◆ CreateLogSoftmax()

std::unique_ptr< IWorkload > CreateLogSoftmax ( const LogSoftmaxQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in RefWorkloadFactory, and WorkloadFactoryBase.

Definition at line 1376 of file WorkloadFactory.cpp.

Referenced by LogSoftmaxLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1378 {
1379  return std::unique_ptr<IWorkload>();
1380 }

◆ CreateLstm()

std::unique_ptr< IWorkload > CreateLstm ( const LstmQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in RefWorkloadFactory, WorkloadFactoryBase, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1382 of file WorkloadFactory.cpp.

Referenced by LstmLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1384 {
1385  return std::unique_ptr<IWorkload>();
1386 }

◆ CreateMaximum()

std::unique_ptr< IWorkload > CreateMaximum ( const MaximumQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in RefWorkloadFactory, WorkloadFactoryBase, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1388 of file WorkloadFactory.cpp.

Referenced by MaximumLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1390 {
1391  return std::unique_ptr<IWorkload>();
1392 }

◆ CreateMean()

std::unique_ptr< IWorkload > CreateMean ( const MeanQueueDescriptor descriptor,
const WorkloadInfo Info 
) const
virtual

Reimplemented in RefWorkloadFactory, WorkloadFactoryBase, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1394 of file WorkloadFactory.cpp.

Referenced by MeanLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1396 {
1397  return std::unique_ptr<IWorkload>();
1398 }

◆ CreateMemCopy()

std::unique_ptr< IWorkload > CreateMemCopy ( const MemCopyQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in RefWorkloadFactory, WorkloadFactoryBase, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1400 of file WorkloadFactory.cpp.

Referenced by IWorkloadFactory::~IWorkloadFactory().

1402 {
1403  return std::unique_ptr<IWorkload>();
1404 }

◆ CreateMemImport()

std::unique_ptr< IWorkload > CreateMemImport ( const MemImportQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1406 of file WorkloadFactory.cpp.

Referenced by IWorkloadFactory::~IWorkloadFactory().

1408 {
1409  return std::unique_ptr<IWorkload>();
1410 }

◆ CreateMerge()

std::unique_ptr< IWorkload > CreateMerge ( const MergeQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in WorkloadFactoryBase.

Definition at line 1412 of file WorkloadFactory.cpp.

Referenced by IWorkloadFactory::~IWorkloadFactory().

1414 {
1415  return std::unique_ptr<IWorkload>();
1416 }

◆ CreateMerger()

std::unique_ptr< IWorkload > CreateMerger ( const MergerQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1418 of file WorkloadFactory.cpp.

Referenced by IWorkloadFactory::~IWorkloadFactory().

1420 {
1421  return std::unique_ptr<IWorkload>();
1422 }

◆ CreateMinimum()

std::unique_ptr< IWorkload > CreateMinimum ( const MinimumQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1424 of file WorkloadFactory.cpp.

Referenced by MinimumLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1426 {
1427  return std::unique_ptr<IWorkload>();
1428 }

◆ CreateMultiplication()

std::unique_ptr< IWorkload > CreateMultiplication ( const MultiplicationQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1430 of file WorkloadFactory.cpp.

Referenced by CompareMultiplicationTest(), MultiplicationLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1432 {
1433  return std::unique_ptr<IWorkload>();
1434 }

◆ CreateNormalization()

std::unique_ptr< IWorkload > CreateNormalization ( const NormalizationQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1436 of file WorkloadFactory.cpp.

Referenced by NormalizationLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1438 {
1439  return std::unique_ptr<IWorkload>();
1440 }

◆ CreateOutput()

std::unique_ptr< IWorkload > CreateOutput ( const OutputQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, ClWorkloadFactory, and SampleDynamicWorkloadFactory.

Definition at line 1442 of file WorkloadFactory.cpp.

Referenced by IWorkloadFactory::~IWorkloadFactory().

1444 {
1445  return std::unique_ptr<IWorkload>();
1446 }

◆ CreatePad()

std::unique_ptr< IWorkload > CreatePad ( const PadQueueDescriptor descriptor,
const WorkloadInfo Info 
) const
virtual

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1448 of file WorkloadFactory.cpp.

Referenced by PadLayer::CreateWorkload(), Pad2dTestCommon(), Pad3dTestCommon(), Pad4dTestCommon(), and IWorkloadFactory::~IWorkloadFactory().

1450 {
1451  return std::unique_ptr<IWorkload>();
1452 }

◆ CreatePermute()

std::unique_ptr< IWorkload > CreatePermute ( const PermuteQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1454 of file WorkloadFactory.cpp.

Referenced by PermuteLayer::CreateWorkload(), PermuteTensorData(), SimplePermuteTestImpl(), and IWorkloadFactory::~IWorkloadFactory().

1456 {
1457  return std::unique_ptr<IWorkload>();
1458 }

◆ CreatePooling2d()

std::unique_ptr< IWorkload > CreatePooling2d ( const Pooling2dQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1460 of file WorkloadFactory.cpp.

Referenced by AdditionAfterMaxPoolTest(), Pooling2dLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1462 {
1463  return std::unique_ptr<IWorkload>();
1464 }

◆ CreatePreCompiled()

std::unique_ptr< IWorkload > CreatePreCompiled ( const PreCompiledQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1466 of file WorkloadFactory.cpp.

Referenced by PreCompiledLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1468 {
1469  return std::unique_ptr<IWorkload>();
1470 }

◆ CreatePrelu()

std::unique_ptr< IWorkload > CreatePrelu ( const PreluQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1472 of file WorkloadFactory.cpp.

Referenced by PreluLayer::CreateWorkload(), PreluTest(), and IWorkloadFactory::~IWorkloadFactory().

1474 {
1475  return std::unique_ptr<IWorkload>();
1476 }

◆ CreateQLstm()

std::unique_ptr< IWorkload > CreateQLstm ( const QLstmQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1484 of file WorkloadFactory.cpp.

Referenced by QLstmLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1486 {
1487  return std::unique_ptr<IWorkload>();
1488 }

◆ CreateQuantize()

std::unique_ptr< IWorkload > CreateQuantize ( const QuantizeQueueDescriptor descriptor,
const WorkloadInfo Info 
) const
virtual

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1478 of file WorkloadFactory.cpp.

Referenced by QuantizeLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1480 {
1481  return std::unique_ptr<IWorkload>();
1482 }

◆ CreateQuantizedLstm()

std::unique_ptr< IWorkload > CreateQuantizedLstm ( const QuantizedLstmQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in WorkloadFactoryBase, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1490 of file WorkloadFactory.cpp.

Referenced by QuantizedLstmLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1492 {
1493  return std::unique_ptr<IWorkload>();
1494 }

◆ CreateReshape()

std::unique_ptr< IWorkload > CreateReshape ( const ReshapeQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1496 of file WorkloadFactory.cpp.

Referenced by ReshapeLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1498 {
1499  return std::unique_ptr<IWorkload>();
1500 }

◆ CreateResize()

std::unique_ptr< IWorkload > CreateResize ( const ResizeQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1508 of file WorkloadFactory.cpp.

Referenced by ResizeLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1510 {
1511  return std::unique_ptr<IWorkload>();
1512 }

◆ CreateResizeBilinear()

std::unique_ptr< IWorkload > CreateResizeBilinear ( const ResizeBilinearQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1502 of file WorkloadFactory.cpp.

Referenced by IWorkloadFactory::~IWorkloadFactory().

1504 {
1505  return std::unique_ptr<IWorkload>();
1506 }

◆ CreateRsqrt()

std::unique_ptr< IWorkload > CreateRsqrt ( const RsqrtQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1514 of file WorkloadFactory.cpp.

Referenced by RsqrtLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1516 {
1517  return std::unique_ptr<IWorkload>();
1518 }

◆ CreateSlice()

std::unique_ptr< IWorkload > CreateSlice ( const SliceQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1520 of file WorkloadFactory.cpp.

Referenced by SliceLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1522 {
1523  return std::unique_ptr<IWorkload>();
1524 }

◆ CreateSoftmax()

std::unique_ptr< IWorkload > CreateSoftmax ( const SoftmaxQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1526 of file WorkloadFactory.cpp.

Referenced by SoftmaxLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1528 {
1529  return std::unique_ptr<IWorkload>();
1530 }

◆ CreateSpaceToBatchNd()

std::unique_ptr< IWorkload > CreateSpaceToBatchNd ( const SpaceToBatchNdQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1538 of file WorkloadFactory.cpp.

Referenced by SpaceToBatchNdLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1540 {
1541  return std::unique_ptr<IWorkload>();
1542 }

◆ CreateSpaceToDepth()

std::unique_ptr< IWorkload > CreateSpaceToDepth ( const SpaceToDepthQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1544 of file WorkloadFactory.cpp.

Referenced by SpaceToDepthLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1546 {
1547  return std::unique_ptr<IWorkload>();
1548 }

◆ CreateSplitter()

std::unique_ptr< IWorkload > CreateSplitter ( const SplitterQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1532 of file WorkloadFactory.cpp.

Referenced by SplitterLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1534 {
1535  return std::unique_ptr<IWorkload>();
1536 }

◆ CreateStack()

std::unique_ptr< IWorkload > CreateStack ( const StackQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1550 of file WorkloadFactory.cpp.

Referenced by StackLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1552 {
1553  return std::unique_ptr<IWorkload>();
1554 }

◆ CreateStridedSlice()

std::unique_ptr< IWorkload > CreateStridedSlice ( const StridedSliceQueueDescriptor descriptor,
const WorkloadInfo Info 
) const
virtual

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1556 of file WorkloadFactory.cpp.

Referenced by StridedSliceLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1558 {
1559  return std::unique_ptr<IWorkload>();
1560 }

◆ CreateSubTensorHandle()

virtual std::unique_ptr<ITensorHandle> CreateSubTensorHandle ( ITensorHandle parent,
TensorShape const &  subTensorShape,
unsigned int const *  subTensorOrigin 
) const
pure virtual

◆ CreateSubtraction()

std::unique_ptr< IWorkload > CreateSubtraction ( const SubtractionQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1562 of file WorkloadFactory.cpp.

Referenced by SubtractionLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1564 {
1565  return std::unique_ptr<IWorkload>();
1566 }

◆ CreateSwitch()

std::unique_ptr< IWorkload > CreateSwitch ( const SwitchQueueDescriptor descriptor,
const WorkloadInfo Info 
) const
virtual

Reimplemented in WorkloadFactoryBase.

Definition at line 1568 of file WorkloadFactory.cpp.

Referenced by SwitchLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

1570 {
1571  return std::unique_ptr<IWorkload>();
1572 }

◆ CreateTensorHandle() [1/2]

virtual std::unique_ptr<ITensorHandle> CreateTensorHandle ( const TensorInfo tensorInfo,
const bool  IsMemoryManaged = true 
) const
pure virtual

Implemented in RefWorkloadFactory, SampleDynamicWorkloadFactory, ClWorkloadFactory, NeonWorkloadFactory, and WorkloadFactoryBase.

Referenced by AdditionAfterMaxPoolTest(), AdditionBroadcast1ElementTestImpl(), AdditionBroadcastTestImpl(), BoundedReLuTestCommon(), BoundedReLuUint8UpperAndLowerBoundTest(), CompareActivationTestImpl(), CompareAdditionTest(), CompareBatchNormTest(), CompareConvolution2dTestImpl(), CompareDepthwiseConvolution2dTestImpl(), CompareMultiplicationTest(), ConcatDifferentInputOutputQParamTest(), Concatenate(), ConcatTest(), ConcatUint16Test(), ConcatUint8DifferentQParamsTest(), ConcatUint8Test(), ConstantLinearActivationTestCommon(), ConvertBf16ToFp32Test(), ConvertFp32ToBf16Test(), Convolution1dTestImpl(), Convolution2dPerAxisQuantTest(), OutputHandler::CreateTensorHandles(), DepthwiseConvolution2dAsymmetricTestImpl(), DepthwiseConvolution2dDepthMul1TestImpl(), DepthwiseConvolution2dPerAxisQuantTest(), DepthwiseConvolution2dTestImpl(), ElementwiseTestHelper(), ElementwiseUnaryTestHelper(), FakeQuantizationTest(), L2Normalization2dShapeTest(), Pad2dTestCommon(), Pad3dTestCommon(), Pad4dTestCommon(), PermuteTensorData(), PreluTest(), SimpleActivationTest(), SimpleConvertFp16ToFp32Test(), SimpleConvertFp32ToFp16Test(), SimpleConvolution2dNhwcTestImpl(), SimpleConvolution2dTestImpl(), SimpleFloorTest(), SimpleFullyConnectedTestImpl(), SimplePermuteTestImpl(), SimpleTransposeTestImpl(), SqrtNNTest(), TransposeConvolution2dPerAxisQuantTest(), and IWorkloadFactory::~IWorkloadFactory().

◆ CreateTensorHandle() [2/2]

virtual std::unique_ptr<ITensorHandle> CreateTensorHandle ( const TensorInfo tensorInfo,
DataLayout  dataLayout,
const bool  IsMemoryManaged = true 
) const
pure virtual

◆ CreateTranspose()

std::unique_ptr< IWorkload > CreateTranspose ( const TransposeQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

Definition at line 1574 of file WorkloadFactory.cpp.

Referenced by TransposeLayer::CreateWorkload(), SimpleTransposeTestImpl(), and IWorkloadFactory::~IWorkloadFactory().

1576 {
1577  return std::unique_ptr<IWorkload>();
1578 }

◆ CreateTransposeConvolution2d()

std::unique_ptr< IWorkload > CreateTransposeConvolution2d ( const TransposeConvolution2dQueueDescriptor descriptor,
const WorkloadInfo info 
) const
virtual

◆ GetBackendId()

virtual const BackendId& GetBackendId ( ) const
pure virtual

◆ IsLayerSupported() [1/2]

bool IsLayerSupported ( const BackendId backendId,
const IConnectableLayer layer,
Optional< DataType dataType,
std::string &  outReasonIfUnsupported 
)
static

Definition at line 45 of file WorkloadFactory.cpp.

References armnn::Activation, armnn::Addition, anchors(), armnn::ArgMinMax, ARMNN_ASSERT, ARMNN_ASSERT_MSG, armnn::BackendRegistryInstance(), armnn::BatchNormalization, armnn::BatchToSpaceNd, armnn::BFloat16, armnn::Boolean, boxEncodings(), armnn::Comparison, armnn::Concat, armnn::Constant, armnn::ConvertBf16ToFp32, armnn::ConvertFp16ToFp32, armnn::ConvertFp32ToBf16, armnn::ConvertFp32ToFp16, armnn::Convolution2d, armnn::Debug, armnn::DepthToSpace, armnn::DepthwiseConvolution2d, armnn::Dequantize, armnn::DetectionPostProcess, armnn::Division, armnn::ElementwiseUnary, armnn::FakeQuantization, armnn::Float16, armnn::Float32, armnn::Floor, armnn::FullyConnected, armnn::Gather, armnn::GetBiasTypeFromWeightsType(), InputSlot::GetConnection(), Layer::GetInputSlot(), Layer::GetInputSlots(), IConnectableLayer::GetName(), Layer::GetOutputSlot(), Layer::GetOutputSlots(), IOutputSlot::GetTensorInfo(), OutputSlot::GetTensorInfo(), Layer::GetType(), armnn::info, armnn::Input, armnn::InstanceNormalization, armnn::L2Normalization, armnn::LogSoftmax, armnn::Lstm, FullyConnectedDescriptor::m_BiasEnabled, LstmInputParamsInfo::m_CellBias, QuantizedLstmInputParamsInfo::m_CellBias, LstmInputParamsInfo::m_CellLayerNormWeights, LstmInputParamsInfo::m_CellToForgetWeights, LstmInputParamsInfo::m_CellToInputWeights, LstmInputParamsInfo::m_CellToOutputWeights, LstmInputParamsInfo::m_ForgetGateBias, QuantizedLstmInputParamsInfo::m_ForgetGateBias, LstmInputParamsInfo::m_ForgetLayerNormWeights, LstmInputParamsInfo::m_InputGateBias, QuantizedLstmInputParamsInfo::m_InputGateBias, LstmInputParamsInfo::m_InputLayerNormWeights, LstmInputParamsInfo::m_InputToCellWeights, QuantizedLstmInputParamsInfo::m_InputToCellWeights, LstmInputParamsInfo::m_InputToForgetWeights, QuantizedLstmInputParamsInfo::m_InputToForgetWeights, LstmInputParamsInfo::m_InputToInputWeights, QuantizedLstmInputParamsInfo::m_InputToInputWeights, LstmInputParamsInfo::m_InputToOutputWeights, QuantizedLstmInputParamsInfo::m_InputToOutputWeights, LstmInputParamsInfo::m_OutputGateBias, QuantizedLstmInputParamsInfo::m_OutputGateBias, LstmInputParamsInfo::m_OutputLayerNormWeights, LstmInputParamsInfo::m_ProjectionBias, LstmInputParamsInfo::m_ProjectionWeights, LstmInputParamsInfo::m_RecurrentToCellWeights, QuantizedLstmInputParamsInfo::m_RecurrentToCellWeights, LstmInputParamsInfo::m_RecurrentToForgetWeights, QuantizedLstmInputParamsInfo::m_RecurrentToForgetWeights, LstmInputParamsInfo::m_RecurrentToInputWeights, QuantizedLstmInputParamsInfo::m_RecurrentToInputWeights, LstmInputParamsInfo::m_RecurrentToOutputWeights, QuantizedLstmInputParamsInfo::m_RecurrentToOutputWeights, armnn::Maximum, armnn::Mean, armnn::MemCopy, armnn::MemImport, armnn::Merge, armnn::Minimum, armnn::Multiplication, armnn::Normalization, armnn::Output, armnn::Pad, armnn::Permute, armnn::Pooling2d, armnn::PreCompiled, armnn::Prelu, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QLstm, armnn::QSymmS16, armnn::QSymmS8, armnn::Quantize, armnn::QuantizedLstm, armnn::Reshape, armnn::Resize, scores(), armnn::Signed32, armnn::Slice, armnn::Softmax, armnn::SpaceToBatchNd, armnn::SpaceToDepth, armnn::Splitter, armnn::Stack, armnn::StandIn, armnn::StridedSlice, armnn::Subtraction, armnn::Switch, armnn::Transpose, armnn::TransposeConvolution2d, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

Referenced by armnn::AttemptBackendAssignment(), LoadedNetwork::GetOutputTensorInfo(), ClWorkloadFactory::IsLayerSupported(), NeonWorkloadFactory::IsLayerSupported(), SampleDynamicWorkloadFactory::IsLayerSupported(), IWorkloadFactory::IsLayerSupported(), RefWorkloadFactory::IsLayerSupported(), and IWorkloadFactory::~IWorkloadFactory().

49 {
50  Optional<std::string&> reason = outReasonIfUnsupported;
51  bool result;
52  const Layer& layer = *(PolymorphicDowncast<const Layer*>(&connectableLayer));
53 
54  auto const& backendRegistry = BackendRegistryInstance();
55  if (!backendRegistry.IsBackendRegistered(backendId))
56  {
57  std::stringstream ss;
58  ss << connectableLayer.GetName() << " is not supported on " << backendId
59  << " because this backend is not registered.";
60 
61  outReasonIfUnsupported = ss.str();
62  return false;
63  }
64 
65  auto backendFactory = backendRegistry.GetFactory(backendId);
66  auto backendObject = backendFactory();
67  auto layerSupportObject = backendObject->GetLayerSupport();
68 
69  switch(layer.GetType())
70  {
72  {
73  auto cLayer = PolymorphicDowncast<const ActivationLayer*>(&layer);
74  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
75  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
76  result = layerSupportObject->IsActivationSupported(
77  OverrideDataType(input, dataType),
78  OverrideDataType(output, dataType),
79  cLayer->GetParameters(),
80  reason);
81  break;
82  }
84  {
85  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
86  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
87  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
88  result = layerSupportObject->IsAdditionSupported(
89  OverrideDataType(input0, dataType),
90  OverrideDataType(input1, dataType),
91  OverrideDataType(output, dataType),
92  reason);
93  break;
94  }
96  {
97  auto cLayer = PolymorphicDowncast<const ArgMinMaxLayer*>(&layer);
98  const ArgMinMaxDescriptor& descriptor = cLayer->GetParameters();
99 
100  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
101  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
102  result = layerSupportObject->IsArgMinMaxSupported(
103  OverrideDataType(input, dataType),
104  OverrideDataType(output, DataType::Signed32),
105  descriptor,
106  reason);
107  break;
108  }
110  {
111  auto cLayer = PolymorphicDowncast<const BatchNormalizationLayer*>(&layer);
112  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
113  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
114  const TensorInfo& mean = cLayer->m_Mean->GetTensorInfo();
115  const TensorInfo& var = cLayer->m_Variance->GetTensorInfo();
116  const TensorInfo& beta = cLayer->m_Beta->GetTensorInfo();
117  const TensorInfo& gamma = cLayer->m_Gamma->GetTensorInfo();
118  result = layerSupportObject->IsBatchNormalizationSupported(
119  OverrideDataType(input, dataType),
120  OverrideDataType(output, dataType),
121  OverrideDataType(mean, dataType),
122  OverrideDataType(var, dataType),
123  OverrideDataType(beta, dataType),
124  OverrideDataType(gamma, dataType),
125  cLayer->GetParameters(),
126  reason);
127  break;
128  }
130  {
131  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
132  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
133  auto cLayer = PolymorphicDowncast<const BatchToSpaceNdLayer*>(&layer);
134 
135  result = layerSupportObject->IsBatchToSpaceNdSupported(OverrideDataType(input, dataType),
136  OverrideDataType(output, dataType),
137  cLayer->GetParameters(),
138  reason);
139  break;
140  }
142  {
143  auto cLayer = PolymorphicDowncast<const ComparisonLayer*>(&layer);
144 
145  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
146  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
147  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
148 
149  result = layerSupportObject->IsComparisonSupported(OverrideDataType(input0, dataType),
150  OverrideDataType(input1, dataType),
151  OverrideDataType(output, DataType::Boolean),
152  cLayer->GetParameters(),
153  reason);
154  break;
155  }
156  case LayerType::Constant:
157  {
158  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
159  result = layerSupportObject->IsConstantSupported(OverrideDataType(output, dataType), reason);
160  break;
161  }
163  {
164  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
165  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
166  result = layerSupportObject->IsConvertBf16ToFp32Supported(input, output, reason);
167  break;
168  }
170  {
171  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
172  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
173  result = layerSupportObject->IsConvertFp16ToFp32Supported(input, output, reason);
174  break;
175  }
177  {
178  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
179  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
180  result = layerSupportObject->IsConvertFp32ToBf16Supported(input, output, reason);
181  break;
182  }
184  {
185  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
186  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
187  result = layerSupportObject->IsConvertFp32ToFp16Supported(input, output, reason);
188  break;
189  }
191  {
192  auto cLayer = PolymorphicDowncast<const Convolution2dLayer*>(&layer);
193 
194  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
195  dataType);
196  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
197  ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
198 
199  const Convolution2dDescriptor& descriptor = cLayer->GetParameters();
200 
201  // Construct optional biases object based on the value of m_BiasEnabled
202  Optional<TensorInfo> biases;
203  if (descriptor.m_BiasEnabled)
204  {
205  biases =
206  OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
207  }
208 
209  result = layerSupportObject->IsConvolution2dSupported(
210  input,
211  output,
212  descriptor,
213  OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
214  biases,
215  reason);
216  break;
217  }
218  case LayerType::Debug:
219  {
220  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
221  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
222 
223  result = layerSupportObject->IsDebugSupported(OverrideDataType(input, dataType),
224  OverrideDataType(output, dataType),
225  reason);
226  break;
227  }
229  {
230  auto cLayer = PolymorphicDowncast<const DepthToSpaceLayer*>(&layer);
231 
232  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
233  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
234 
235  result = layerSupportObject->IsDepthToSpaceSupported(OverrideDataType(input, dataType),
236  OverrideDataType(output, dataType),
237  cLayer->GetParameters(),
238  reason);
239  break;
240  }
242  {
243  auto cLayer = PolymorphicDowncast<const DepthwiseConvolution2dLayer*>(&layer);
244  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
245  dataType);
246  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
247  ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
248 
249  const DepthwiseConvolution2dDescriptor& descriptor = cLayer->GetParameters();
250 
251  // Construct optional biases object based on the value of m_BiasEnabled
252  Optional<TensorInfo> biases;
253  if (descriptor.m_BiasEnabled)
254  {
255  biases =
256  OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
257  }
258 
259  result = layerSupportObject->IsDepthwiseConvolutionSupported(
260  input,
261  output,
262  descriptor,
263  OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
264  biases,
265  reason);
266  break;
267  }
269  {
270  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
271  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
272 
273  result = layerSupportObject->IsDequantizeSupported(input,
274  OverrideDataType(output, dataType),
275  reason);
276  break;
277  }
279  {
280  auto cLayer = PolymorphicDowncast<const DetectionPostProcessLayer*>(&layer);
281  const TensorInfo& boxEncodings = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
282  const TensorInfo& scores = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
283  const TensorInfo& anchors = cLayer->m_Anchors->GetTensorInfo();
284 
285  const TensorInfo& detectionBoxes = layer.GetOutputSlot(0).GetTensorInfo();
286  const TensorInfo& detectionClasses = layer.GetOutputSlot(1).GetTensorInfo();
287  const TensorInfo& detectionScores = layer.GetOutputSlot(2).GetTensorInfo();
288  const TensorInfo& numDetections = layer.GetOutputSlot(3).GetTensorInfo();
289 
290  const DetectionPostProcessDescriptor& descriptor = cLayer->GetParameters();
291  result = layerSupportObject->IsDetectionPostProcessSupported(boxEncodings,
292  scores,
293  anchors,
294  detectionBoxes,
295  detectionClasses,
296  detectionScores,
297  numDetections,
298  descriptor,
299  reason);
300  break;
301  }
303  {
304  auto cLayer = PolymorphicDowncast<const ElementwiseUnaryLayer*>(&layer);
305 
306  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
307  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
308 
309  result = layerSupportObject->IsElementwiseUnarySupported(OverrideDataType(input, dataType),
310  OverrideDataType(output, dataType),
311  cLayer->GetParameters(),
312  reason);
313  break;
314  }
316  {
317  auto cLayer = PolymorphicDowncast<const FakeQuantizationLayer*>(&layer);
318  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
319  result = layerSupportObject->IsFakeQuantizationSupported(OverrideDataType(input, dataType),
320  cLayer->GetParameters(),
321  reason);
322  break;
323  }
324  case LayerType::Floor:
325  {
326  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
327  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
328  result = layerSupportObject->IsFloorSupported(OverrideDataType(input, dataType),
329  OverrideDataType(output, dataType),
330  reason);
331  break;
332  }
334  {
335  auto cLayer = PolymorphicDowncast<const FullyConnectedLayer*>(&layer);
336  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
337  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
338  ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
339 
340  TensorInfo biasInfo;
341  const TensorInfo * biasInfoPtr = nullptr;
342  static const TensorInfo dummyBFloat16Bias(TensorShape({1,1,1,1}), DataType::BFloat16);
343  static const TensorInfo dummyFloat16Bias(TensorShape({1,1,1,1}), DataType::Float16);
344  static const TensorInfo dummyFloat32Bias(TensorShape({1,1,1,1}), DataType::Float32);
345  static const TensorInfo dummyQA8Bias(TensorShape({1,1,1,1}), DataType::Signed32);
346 
347  const FullyConnectedDescriptor& descriptor = cLayer->GetParameters();
348  if (descriptor.m_BiasEnabled)
349  {
350  ARMNN_ASSERT(cLayer->m_Bias.get() != nullptr);
351  biasInfo = OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
352  biasInfoPtr = &biasInfo;
353  }
354  else
355  {
356  // If biases are not enabled pass a dummy tensorinfo for the validation
357  switch(input.GetDataType())
358  {
359  case DataType::BFloat16:
360  {
361  biasInfoPtr = &dummyBFloat16Bias;
362  break;
363  }
364  case DataType::Float16:
365  {
366  biasInfoPtr = &dummyFloat16Bias;
367  break;
368  }
369  case DataType::Float32:
370  {
371  biasInfoPtr = &dummyFloat32Bias;
372  break;
373  }
374  case DataType::QAsymmU8:
375  case DataType::QAsymmS8:
376  case DataType::QSymmS8:
377  case DataType::QSymmS16:
378  {
379  biasInfoPtr = &dummyQA8Bias;
380  break;
381  }
382  default:
383  {
384  ARMNN_ASSERT_MSG(false, "Unexpected bias type");
385  }
386  }
387  }
388 
389  result = layerSupportObject->IsFullyConnectedSupported(
390  OverrideDataType(input, dataType),
391  OverrideDataType(output, dataType),
392  OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
393  *biasInfoPtr,
394  descriptor,
395  reason);
396  break;
397  }
398  case LayerType::Gather:
399  {
400  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
401  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
402  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
403  result = layerSupportObject->IsGatherSupported(OverrideDataType(input0, dataType),
404  input1,
405  OverrideDataType(output, dataType),
406  reason);
407  break;
408  }
409  case LayerType::Input:
410  {
411  const TensorInfo& input = layer.GetOutputSlot(0).GetTensorInfo();
412  result = layerSupportObject->IsInputSupported(OverrideDataType(input, dataType), reason);
413  break;
414  }
416  {
417  auto cLayer = PolymorphicDowncast<const InstanceNormalizationLayer*>(&layer);
418  const InstanceNormalizationDescriptor& descriptor = cLayer->GetParameters();
419 
420  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
421  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
422 
423  result = layerSupportObject->IsInstanceNormalizationSupported(
424  OverrideDataType(input, dataType),
425  OverrideDataType(output, dataType),
426  descriptor,
427  reason);
428  break;
429  }
431  {
432  auto cLayer = PolymorphicDowncast<const L2NormalizationLayer*>(&layer);
433  const L2NormalizationDescriptor& descriptor = cLayer->GetParameters();
434 
435  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
436  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
437 
438  result = layerSupportObject->IsL2NormalizationSupported(
439  OverrideDataType(input, dataType),
440  OverrideDataType(output, dataType),
441  descriptor,
442  reason);
443  break;
444  }
446  {
447  auto cLayer = PolymorphicDowncast<const LogSoftmaxLayer*>(&layer);
448 
449  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
450  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
451 
452  result = layerSupportObject->IsLogSoftmaxSupported(OverrideDataType(input, dataType),
453  OverrideDataType(output, dataType),
454  cLayer->GetParameters(),
455  reason);
456  break;
457  }
458  case LayerType::Lstm:
459  {
460  auto cLayer = PolymorphicDowncast<const LstmLayer*>(&layer);
461  const LstmDescriptor& descriptor = cLayer->GetParameters();
462 
463  // All inputs.
464  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
465  dataType);
466  const TensorInfo& outputStateIn = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
467  dataType);
468  const TensorInfo& cellStateIn = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
469  dataType);
470  // All outputs
471  const TensorInfo& scratchBuffer = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
472  const TensorInfo& outputStateOut = OverrideDataType(layer.GetOutputSlot(1).GetTensorInfo(), dataType);
473  const TensorInfo& cellStateOut = OverrideDataType(layer.GetOutputSlot(2).GetTensorInfo(), dataType);
474  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(3).GetTensorInfo(), dataType);
475 
476  // Basic parameters
477  const TensorInfo& inputToForgetWeights
478  = OverrideDataType(cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), dataType);
479  const TensorInfo& inputToCellWeights
480  = OverrideDataType(cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), dataType);
481  const TensorInfo& inputToOutputWeights
482  = OverrideDataType(cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), dataType);
483  const TensorInfo& recurrentToForgetWeights
484  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), dataType);
485  const TensorInfo& recurrentToCellWeights
486  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), dataType);
487  const TensorInfo& recurrentToOutputWeights
488  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), dataType);
489  const TensorInfo& forgetGateBias
490  = OverrideDataType(cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), dataType);
491  const TensorInfo& cellBias
492  = OverrideDataType(cLayer->m_BasicParameters.m_CellBias->GetTensorInfo(), dataType);
493  const TensorInfo& outputGateBias
494  = OverrideDataType(cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo(), dataType);
495 
496  LstmInputParamsInfo paramsInfo;
497 
498  paramsInfo.m_InputToForgetWeights = &inputToForgetWeights;
499  paramsInfo.m_InputToCellWeights = &inputToCellWeights;
500  paramsInfo.m_InputToOutputWeights = &inputToOutputWeights;
501  paramsInfo.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
502  paramsInfo.m_RecurrentToCellWeights = &recurrentToCellWeights;
503  paramsInfo.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
504  paramsInfo.m_ForgetGateBias = &forgetGateBias;
505  paramsInfo.m_CellBias = &cellBias;
506  paramsInfo.m_OutputGateBias = &outputGateBias;
507 
508 
509  // Optional parameters
510  TensorInfo optInputToInputWeights;
511  TensorInfo optRecurrentToInputWeights;
512  TensorInfo optCellToInputWeights;
513  TensorInfo optInputGateBias;
514  TensorInfo optProjectionWeights;
515  TensorInfo optProjectionBias;
516  TensorInfo optCellToForgetWeights;
517  TensorInfo optCellToOutputWeights;
518  TensorInfo optInputLayerNormWeights;
519  TensorInfo optForgetLayerNormWeights;
520  TensorInfo optCellLayerNormWeights;
521  TensorInfo optOutputLayerNormWeights;
522 
523  if(!descriptor.m_CifgEnabled)
524  {
525  optInputToInputWeights =
526  OverrideDataType(cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), dataType);
527  paramsInfo.m_InputToInputWeights = &optInputToInputWeights;
528 
529  optRecurrentToInputWeights =
530  OverrideDataType(cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), dataType);
531  paramsInfo.m_RecurrentToInputWeights = &optRecurrentToInputWeights;
532  optInputGateBias =
533  OverrideDataType(cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo(), dataType);
534  paramsInfo.m_InputGateBias = &optInputGateBias;
535  }
536 
537  if(descriptor.m_ProjectionEnabled)
538  {
539  optProjectionWeights =
540  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), dataType);
541  paramsInfo.m_ProjectionWeights = &optProjectionWeights;
542  if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
543  {
544  optProjectionBias =
545  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), dataType);
546  paramsInfo.m_ProjectionBias = &optProjectionBias;
547  }
548  }
549 
550  if(descriptor.m_PeepholeEnabled)
551  {
552  if(!descriptor.m_CifgEnabled)
553  {
554  optCellToInputWeights =
555  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo(),
556  dataType);
557  paramsInfo.m_CellToInputWeights = &optCellToInputWeights;
558  }
559  optCellToForgetWeights =
560  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), dataType);
561  paramsInfo.m_CellToForgetWeights = &optCellToForgetWeights;
562  optCellToOutputWeights =
563  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), dataType);
564  paramsInfo.m_CellToOutputWeights = &optCellToOutputWeights;
565  }
566 
567  if(descriptor.m_LayerNormEnabled)
568  {
569  if (!descriptor.m_CifgEnabled)
570  {
571  optInputLayerNormWeights = OverrideDataType(
572  cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(), dataType);
573  paramsInfo.m_InputLayerNormWeights = &optInputLayerNormWeights;
574  }
575 
576  optForgetLayerNormWeights = OverrideDataType(
577  cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(), dataType);
578  paramsInfo.m_ForgetLayerNormWeights = &optForgetLayerNormWeights;
579 
580  optCellLayerNormWeights = OverrideDataType(
581  cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(), dataType);
582  paramsInfo.m_CellLayerNormWeights = &optCellLayerNormWeights;
583 
584  optOutputLayerNormWeights = OverrideDataType(
585  cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(), dataType);
586  paramsInfo.m_OutputLayerNormWeights = &optOutputLayerNormWeights;
587  }
588 
589  result = layerSupportObject->IsLstmSupported(
590  input,
591  outputStateIn,
592  cellStateIn,
593  scratchBuffer,
594  outputStateOut,
595  cellStateOut,
596  output,
597  descriptor,
598  paramsInfo,
599  reason);
600  break;
601  }
602  case LayerType::Maximum:
603  {
604  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
605  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
606  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
607 
608  result = layerSupportObject->IsMaximumSupported(OverrideDataType(input0, dataType),
609  OverrideDataType(input1, dataType),
610  OverrideDataType(output, dataType),
611  reason);
612  break;
613  }
614  case LayerType::MemCopy:
615  {
616  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
617  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
618 
619  result = layerSupportObject->IsMemCopySupported(OverrideDataType(input, dataType),
620  OverrideDataType(output, dataType),
621  reason);
622  break;
623  }
625  {
626  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
627  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
628 
629  result = layerSupportObject->IsMemImportSupported(OverrideDataType(input, dataType),
630  OverrideDataType(output, dataType),
631  reason);
632  break;
633  }
634  case LayerType::Merge:
635  {
636  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
637  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
638  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
639 
640  result = layerSupportObject->IsMergeSupported(OverrideDataType(input0, dataType),
641  OverrideDataType(input1, dataType),
642  OverrideDataType(output, dataType),
643  reason);
644  break;
645  }
646  case LayerType::Concat:
647  {
648  auto cLayer = PolymorphicDowncast<const ConcatLayer*>(&layer);
649 
650  // Get vector of all inputs.
651  auto getTensorInfo = [&dataType](const InputSlot& slot)
652  {
653  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
654  };
655  auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfo);
656  auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfo);
657  std::vector<TensorInfo> inputs(beginI, endI);
658 
659  auto getTensorInfoPtr = [](const TensorInfo& info)
660  {
661  return &info;
662  };
663  auto beginPtr = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr);
664  auto endPtr = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr);
665  std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
666 
667  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
668 
669  result = layerSupportObject->IsConcatSupported(inputPtrs, output, cLayer->GetParameters(), reason);
670 
671 
672  break;
673  }
675  {
676  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
677  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
678  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
679  result = layerSupportObject->IsMultiplicationSupported(
680  OverrideDataType(input0, dataType),
681  OverrideDataType(input1, dataType),
682  OverrideDataType(output, dataType),
683  reason);
684  break;
685  }
687  {
688  auto cLayer = PolymorphicDowncast<const NormalizationLayer*>(&layer);
689  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
690  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
691  result = layerSupportObject->IsNormalizationSupported(OverrideDataType(input, dataType),
692  OverrideDataType(output, dataType),
693  cLayer->GetParameters(),
694  reason);
695  break;
696  }
697  case LayerType::Output:
698  {
699  const TensorInfo& output = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
700  result = layerSupportObject->IsOutputSupported(OverrideDataType(output, dataType), reason);
701  break;
702  }
703  case LayerType::Permute:
704  {
705  auto cLayer = PolymorphicDowncast<const PermuteLayer*>(&layer);
706  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
707  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
708  result = layerSupportObject->IsPermuteSupported(OverrideDataType(input, dataType),
709  OverrideDataType(output, dataType),
710  cLayer->GetParameters(),
711  reason);
712  break;
713  }
714  case LayerType::Pad:
715  {
716  auto cLayer = PolymorphicDowncast<const PadLayer*>(&layer);
717  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
718  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
719  result = layerSupportObject->IsPadSupported(
720  OverrideDataType(input, dataType),
721  OverrideDataType(output, dataType),
722  cLayer->GetParameters(),
723  reason);
724  break;
725  }
727  {
728  auto cLayer = PolymorphicDowncast<const Pooling2dLayer*>(&layer);
729  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
730  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
731  result = layerSupportObject->IsPooling2dSupported(OverrideDataType(input, dataType),
732  OverrideDataType(output, dataType),
733  cLayer->GetParameters(),
734  reason);
735  break;
736  }
738  {
739  auto cLayer = PolymorphicDowncast<const PreCompiledLayer*>(&layer);
740  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
741  result = layerSupportObject->IsPreCompiledSupported(OverrideDataType(input, dataType),
742  cLayer->GetParameters(),
743  reason);
744  break;
745  }
746  case LayerType::Quantize:
747  {
748  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
749  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
750  result = layerSupportObject->IsQuantizeSupported(input, output, reason);
751  break;
752  }
753  case LayerType::QLstm:
754  {
755  auto cLayer = PolymorphicDowncast<const QLstmLayer*>(&layer);
756  const QLstmDescriptor& descriptor = cLayer->GetParameters();
757 
758  // Inputs
759  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
760  const TensorInfo& previousOutputIn = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
761  const TensorInfo& previousCellStateIn = layer.GetInputSlot(2).GetConnection()->GetTensorInfo();
762 
763  // Outputs
764  const TensorInfo& outputStateOut = layer.GetOutputSlot(0).GetTensorInfo();
765  const TensorInfo& cellStateOut = layer.GetOutputSlot(1).GetTensorInfo();
766  const TensorInfo& output = layer.GetOutputSlot(2).GetTensorInfo();
767 
768  // Lstm parameters
769  LstmInputParamsInfo paramsInfo;
770 
771  // Basic parameters
772  paramsInfo.m_InputToForgetWeights = &cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo();
773  paramsInfo.m_InputToCellWeights = &cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo();
774  paramsInfo.m_InputToOutputWeights = &cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo();
775 
776  paramsInfo.m_RecurrentToForgetWeights =
777  &cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo();
778  paramsInfo.m_RecurrentToCellWeights =
779  &cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo();
780  paramsInfo.m_RecurrentToOutputWeights =
781  &cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo();
782 
783  paramsInfo.m_ForgetGateBias = &cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo();
784  paramsInfo.m_CellBias = &cLayer->m_BasicParameters.m_CellBias->GetTensorInfo();
785  paramsInfo.m_OutputGateBias = &cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo();
786 
787  if(!descriptor.m_CifgEnabled)
788  {
789  paramsInfo.m_InputToInputWeights = &cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo();
790  paramsInfo.m_RecurrentToInputWeights =
791  &cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo();
792  paramsInfo.m_InputGateBias = &cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo();
793  }
794 
795  if(descriptor.m_ProjectionEnabled)
796  {
797  paramsInfo.m_ProjectionWeights = &cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo();
798 
799  // Projection bias is optional even if projection is enabled
800  if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
801  {
802  paramsInfo.m_ProjectionBias = &cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo();
803  }
804  }
805 
806  if(descriptor.m_PeepholeEnabled)
807  {
808  if (!descriptor.m_CifgEnabled)
809  {
810  paramsInfo.m_CellToInputWeights =
811  &cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo();
812  }
813 
814  paramsInfo.m_CellToForgetWeights =
815  &cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo();
816  paramsInfo.m_CellToOutputWeights = &cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo();
817  }
818 
819  if(descriptor.m_LayerNormEnabled)
820  {
821  if (!descriptor.m_CifgEnabled)
822  {
823  paramsInfo.m_InputLayerNormWeights =
824  &cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo();
825  }
826 
827  paramsInfo.m_ForgetLayerNormWeights =
828  &cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo();
829  paramsInfo.m_CellLayerNormWeights =
830  &cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo();
831  paramsInfo.m_OutputLayerNormWeights =
832  &cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo();
833  }
834 
835  result = layerSupportObject->IsQLstmSupported(input,
836  previousOutputIn,
837  previousCellStateIn,
838  outputStateOut,
839  cellStateOut,
840  output,
841  descriptor,
842  paramsInfo,
843  reason);
844  break;
845  }
847  {
848  auto cLayer = PolymorphicDowncast<const QuantizedLstmLayer*>(&layer);
849 
850  // Inputs
851  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
852  const TensorInfo& previousCellStateIn = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
853  const TensorInfo& previousOutputIn = layer.GetInputSlot(2).GetConnection()->GetTensorInfo();
854 
855  // Outputs
856  const TensorInfo& cellStateOut = layer.GetOutputSlot(0).GetTensorInfo();
857  const TensorInfo& output = layer.GetOutputSlot(1).GetTensorInfo();
858 
859  // QuantizedLstm parameters
860  QuantizedLstmInputParamsInfo paramsInfo;
861 
862  paramsInfo.m_InputToInputWeights =
863  &cLayer->m_QuantizedLstmParameters.m_InputToInputWeights->GetTensorInfo();
864  paramsInfo.m_InputToForgetWeights =
865  &cLayer->m_QuantizedLstmParameters.m_InputToForgetWeights->GetTensorInfo();
866  paramsInfo.m_InputToCellWeights =
867  &cLayer->m_QuantizedLstmParameters.m_InputToCellWeights->GetTensorInfo();
868  paramsInfo.m_InputToOutputWeights =
869  &cLayer->m_QuantizedLstmParameters.m_InputToOutputWeights->GetTensorInfo();
870 
871  paramsInfo.m_RecurrentToInputWeights =
872  &cLayer->m_QuantizedLstmParameters.m_RecurrentToInputWeights->GetTensorInfo();
873  paramsInfo.m_RecurrentToForgetWeights =
874  &cLayer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights->GetTensorInfo();
875  paramsInfo.m_RecurrentToCellWeights =
876  &cLayer->m_QuantizedLstmParameters.m_RecurrentToCellWeights->GetTensorInfo();
877  paramsInfo.m_RecurrentToOutputWeights =
878  &cLayer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights->GetTensorInfo();
879 
880  paramsInfo.m_InputGateBias =
881  &cLayer->m_QuantizedLstmParameters.m_InputGateBias->GetTensorInfo();
882  paramsInfo.m_ForgetGateBias =
883  &cLayer->m_QuantizedLstmParameters.m_ForgetGateBias->GetTensorInfo();
884  paramsInfo.m_CellBias =
885  &cLayer->m_QuantizedLstmParameters.m_CellBias->GetTensorInfo();
886  paramsInfo.m_OutputGateBias =
887  &cLayer->m_QuantizedLstmParameters.m_OutputGateBias->GetTensorInfo();;
888 
889  result = layerSupportObject->IsQuantizedLstmSupported(input,
890  previousCellStateIn,
891  previousOutputIn,
892  cellStateOut,
893  output,
894  paramsInfo,
895  reason);
896  break;
897  }
898  case LayerType::Division:
899  {
900  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
901  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
902  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
903  result = layerSupportObject->IsDivisionSupported(
904  OverrideDataType(input0, dataType),
905  OverrideDataType(input1, dataType),
906  OverrideDataType(output, dataType),
907  reason);
908  break;
909  }
910  case LayerType::Reshape:
911  {
912  auto cLayer = PolymorphicDowncast<const ReshapeLayer*>(&layer);
913  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
914  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
915  result = layerSupportObject->IsReshapeSupported(OverrideDataType(input, dataType),
916  OverrideDataType(output, dataType),
917  cLayer->GetParameters(),
918  reason);
919  break;
920  }
921  case LayerType::Resize:
922  {
923  auto cLayer = PolymorphicDowncast<const ResizeLayer*>(&layer);
924  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
925  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
926  result = layerSupportObject->IsResizeSupported(OverrideDataType(input, dataType),
927  OverrideDataType(output, dataType),
928  cLayer->GetParameters(),
929  reason);
930  break;
931  }
932  case LayerType::Slice:
933  {
934  auto cLayer = PolymorphicDowncast<const SliceLayer*>(&layer);
935 
936  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
937  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
938 
939  result = layerSupportObject->IsSliceSupported(OverrideDataType(input, dataType),
940  OverrideDataType(output, dataType),
941  cLayer->GetParameters(),
942  reason);
943  break;
944  }
945  case LayerType::Softmax:
946  {
947  auto cLayer = PolymorphicDowncast<const SoftmaxLayer*>(&layer);
948  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
949  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
950  result = layerSupportObject->IsSoftmaxSupported(OverrideDataType(input, dataType),
951  OverrideDataType(output, dataType),
952  cLayer->GetParameters(),
953  reason);
954  break;
955  }
957  {
958  auto cLayer = PolymorphicDowncast<const SpaceToBatchNdLayer*>(&layer);
959  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
960  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
961  result = layerSupportObject->IsSpaceToBatchNdSupported(OverrideDataType(input, dataType),
962  OverrideDataType(output, dataType),
963  cLayer->GetParameters(),
964  reason);
965  break;
966  }
968  {
969  auto cLayer = PolymorphicDowncast<const SpaceToDepthLayer*>(&layer);
970 
971  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
972  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
973 
974  result = layerSupportObject->IsSpaceToDepthSupported(OverrideDataType(input, dataType),
975  OverrideDataType(output, dataType),
976  cLayer->GetParameters(),
977  reason);
978  break;
979  }
980  case LayerType::Splitter:
981  {
982  auto cLayer = PolymorphicDowncast<const SplitterLayer*>(&layer);
983  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
984 
985  // Get vector of all outputs.
986  auto getTensorInfo = [&dataType](const OutputSlot& slot)
987  {
988  return OverrideDataType(slot.GetTensorInfo(), dataType);
989  };
990  auto beginI = boost::make_transform_iterator(layer.GetOutputSlots().begin(), getTensorInfo);
991  auto endI = boost::make_transform_iterator(layer.GetOutputSlots().end(), getTensorInfo);
992  std::vector<TensorInfo> outputs(beginI, endI);
993 
994  const std::vector<std::reference_wrapper<TensorInfo>> outputPtrs(outputs.begin(), outputs.end());
995 
996  result = layerSupportObject->IsSplitterSupported(OverrideDataType(input, dataType),
997  outputPtrs,
998  cLayer->GetParameters(),
999  reason);
1000  break;
1001  }
1002  case LayerType::Stack:
1003  {
1004  auto cLayer = PolymorphicDowncast<const StackLayer*>(&layer);
1005 
1006  // Get vector of all inputs.
1007  auto getTensorInfo = [&dataType](const InputSlot& slot)
1008  {
1009  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
1010  };
1011  auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfo);
1012  auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfo);
1013  std::vector<TensorInfo> inputs(beginI, endI);
1014 
1015  auto getTensorInfoPtr = [](const TensorInfo& info)
1016  {
1017  return &info;
1018  };
1019  auto beginPtr = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr);
1020  auto endPtr = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr);
1021  std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
1022 
1023  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1024 
1025  result = layerSupportObject->IsStackSupported(inputPtrs, output, cLayer->GetParameters(), reason);
1026 
1027  break;
1028  }
1029  case LayerType::StandIn:
1030  {
1031  auto cLayer = PolymorphicDowncast<const StandInLayer*>(&layer);
1032 
1033  // Get vector of all inputs.
1034  auto getTensorInfoIn = [&dataType](const InputSlot& slot)
1035  {
1036  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
1037  };
1038  auto getTensorInfoOut = [&dataType](const OutputSlot& slot)
1039  {
1040  return OverrideDataType(slot.GetTensorInfo(), dataType);
1041  };
1042  auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfoIn);
1043  auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfoIn);
1044  std::vector<TensorInfo> inputs(beginI, endI);
1045 
1046  auto beginO = boost::make_transform_iterator(layer.GetOutputSlots().begin(), getTensorInfoOut);
1047  auto endO = boost::make_transform_iterator(layer.GetOutputSlots().end(), getTensorInfoOut);
1048  std::vector<TensorInfo> outputs(beginO, endO);
1049 
1050 
1051  auto getTensorInfoPtr = [](const TensorInfo& info)
1052  {
1053  return &info;
1054  };
1055  auto beginPtrI = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr);
1056  auto endPtrI = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr);
1057  std::vector<const TensorInfo*> inputPtrs(beginPtrI, endPtrI);
1058 
1059  auto beginPtrO = boost::make_transform_iterator(outputs.begin(), getTensorInfoPtr);
1060  auto endPtrO = boost::make_transform_iterator(outputs.end(), getTensorInfoPtr);
1061  std::vector<const TensorInfo*> outputPtrs(beginPtrO, endPtrO);
1062 
1063 
1064  result = layerSupportObject->IsStandInSupported(inputPtrs,
1065  outputPtrs,
1066  cLayer->GetParameters(),
1067  reason);
1068  break;
1069  }
1071  {
1072  auto cLayer = PolymorphicDowncast<const StridedSliceLayer*>(&layer);
1073  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1074  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1075  result = layerSupportObject->IsStridedSliceSupported(OverrideDataType(input, dataType),
1076  OverrideDataType(output, dataType),
1077  cLayer->GetParameters(),
1078  reason);
1079  break;
1080  }
1082  {
1083  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1084  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1085  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1086  result = layerSupportObject->IsSubtractionSupported(
1087  OverrideDataType(input0, dataType),
1088  OverrideDataType(input1, dataType),
1089  OverrideDataType(output, dataType),
1090  reason);
1091  break;
1092  }
1093  case LayerType::Switch:
1094  {
1095  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1096  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1097  const TensorInfo& output0 = layer.GetOutputSlot(0).GetTensorInfo();
1098  const TensorInfo& output1 = layer.GetOutputSlot(1).GetTensorInfo();
1099  result = layerSupportObject->IsSwitchSupported(OverrideDataType(input0, dataType),
1100  OverrideDataType(input1, dataType),
1101  OverrideDataType(output0, dataType),
1102  OverrideDataType(output1, dataType),
1103  reason);
1104  break;
1105  }
1106  case LayerType::Mean:
1107  {
1108  auto cLayer = PolymorphicDowncast<const MeanLayer*>(&layer);
1109  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1110  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1111  result = layerSupportObject->IsMeanSupported(
1112  OverrideDataType(input, dataType),
1113  OverrideDataType(output, dataType),
1114  cLayer->GetParameters(),
1115  reason);
1116  break;
1117  }
1118  case LayerType::Minimum:
1119  {
1120  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1121  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1122  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1123  result = layerSupportObject->IsMinimumSupported(OverrideDataType(input0, dataType),
1124  OverrideDataType(input1, dataType),
1125  OverrideDataType(output, dataType),
1126  reason);
1127  break;
1128  }
1129  case LayerType::Prelu:
1130  {
1131  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1132  const TensorInfo& alpha = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1133  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1134  result = layerSupportObject->IsPreluSupported(OverrideDataType(input, dataType),
1135  OverrideDataType(alpha, dataType),
1136  OverrideDataType(output, dataType),
1137  reason);
1138  break;
1139  }
1140  case LayerType::Transpose:
1141  {
1142  auto cLayer = PolymorphicDowncast<const TransposeLayer*>(&layer);
1143  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1144  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1145  result = layerSupportObject->IsTransposeSupported(OverrideDataType(input, dataType),
1146  OverrideDataType(output, dataType),
1147  cLayer->GetParameters(),
1148  reason);
1149  break;
1150  }
1152  {
1153  auto cLayer = PolymorphicDowncast<const TransposeConvolution2dLayer*>(&layer);
1154 
1155  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
1156  dataType);
1157  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
1158 
1159  const TransposeConvolution2dDescriptor& descriptor = cLayer->GetParameters();
1160 
1161  Optional<TensorInfo> biases;
1162  if (descriptor.m_BiasEnabled)
1163  {
1164  ARMNN_ASSERT(cLayer->m_Bias.get() != nullptr);
1165  biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(),
1166  GetBiasTypeFromWeightsType(dataType));
1167  }
1168 
1169  ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
1170  const TensorInfo weights = OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType);
1171 
1172  result = layerSupportObject->IsTransposeConvolution2dSupported(input,
1173  output,
1174  descriptor,
1175  weights,
1176  biases,
1177  reason);
1178 
1179  break;
1180  }
1181  default:
1182  {
1183  ARMNN_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
1184  reason.value() = "Unrecognised layer type";
1185  result = false;
1186  break;
1187  }
1188  }
1189  return result;
1190 }
BackendRegistry & BackendRegistryInstance()
std::vector< float > boxEncodings({ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f })
armnn::Optional< armnn::DataType > GetBiasTypeFromWeightsType(armnn::Optional< armnn::DataType > weightsType)
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
std::vector< float > scores({ 0.0f, 0.9f, 0.8f, 0.0f, 0.75f, 0.72f, 0.0f, 0.6f, 0.5f, 0.0f, 0.93f, 0.95f, 0.0f, 0.5f, 0.4f, 0.0f, 0.3f, 0.2f })
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })

◆ IsLayerSupported() [2/2]

bool IsLayerSupported ( const IConnectableLayer layer,
Optional< DataType dataType,
std::string &  outReasonIfUnsupported 
)
static

Definition at line 1192 of file WorkloadFactory.cpp.

References IWorkloadFactory::IsLayerSupported().

1195 {
1196  auto layer = PolymorphicDowncast<const Layer*>(&connectableLayer);
1197  return IsLayerSupported(layer->GetBackendId(), connectableLayer, dataType, outReasonIfUnsupported);
1198 }
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)

◆ SupportsSubTensors()


The documentation for this class was generated from the following files: