From 6940dd720ebb6b3d1df8ca203ab696daefe58189 Mon Sep 17 00:00:00 2001 From: Jim Flynn Date: Fri, 20 Mar 2020 12:25:56 +0000 Subject: renamed Documentation folder 20.02 and added .nojekyll file Signed-off-by: Jim Flynn --- 20.02/classarmnn_1_1_i_workload_factory.xhtml | 3280 +++++++++++++++++++++++++ 1 file changed, 3280 insertions(+) create mode 100644 20.02/classarmnn_1_1_i_workload_factory.xhtml (limited to '20.02/classarmnn_1_1_i_workload_factory.xhtml') diff --git a/20.02/classarmnn_1_1_i_workload_factory.xhtml b/20.02/classarmnn_1_1_i_workload_factory.xhtml new file mode 100644 index 0000000000..766446fb6d --- /dev/null +++ b/20.02/classarmnn_1_1_i_workload_factory.xhtml @@ -0,0 +1,3280 @@ + + + + + + + + + + + + + +ArmNN: IWorkloadFactory Class Reference + + + + + + + + + + + + + + + + +
+
+ + + + ArmNN + + + +
+
+  20.02 +
+
+
+ + + + + + + +
+
+ +
+
+
+ +
+ +
+
+ + +
+ +
+ +
+ +
+
IWorkloadFactory Class Referenceabstract
+
+
+ +

#include <WorkloadFactory.hpp>

+
+Inheritance diagram for IWorkloadFactory:
+
+
+ + +RefWorkloadFactory +SampleDynamicWorkloadFactory +WorkloadFactoryBase +ClWorkloadFactory +NeonWorkloadFactory + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+Public Member Functions

virtual ~IWorkloadFactory ()
 
virtual const BackendIdGetBackendId () const =0
 
virtual bool SupportsSubTensors () const =0
 
virtual std::unique_ptr< ITensorHandleCreateSubTensorHandle (ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
 
virtual std::unique_ptr< IWorkloadCreateInput (const InputQueueDescriptor &descriptor, const WorkloadInfo &info) const =0
 
virtual std::unique_ptr< ITensorHandleCreateTensorHandle (const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
 
virtual std::unique_ptr< ITensorHandleCreateTensorHandle (const TensorInfo &tensorInfo, DataLayout dataLayout, const bool IsMemoryManaged=true) const =0
 
virtual std::unique_ptr< IWorkloadCreateAbs (const AbsQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateActivation (const ActivationQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateAddition (const AdditionQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateArgMinMax (const ArgMinMaxQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateBatchNormalization (const BatchNormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateBatchToSpaceNd (const BatchToSpaceNdQueueDescriptor &descriptor, const WorkloadInfo &Info) const
 
virtual std::unique_ptr< IWorkloadCreateComparison (const ComparisonQueueDescriptor &descriptor, const WorkloadInfo &Info) const
 
virtual std::unique_ptr< IWorkloadCreateConcat (const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateConstant (const ConstantQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateConvertFp16ToFp32 (const ConvertFp16ToFp32QueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateConvertFp32ToFp16 (const ConvertFp32ToFp16QueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateConvolution2d (const Convolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateDebug (const DebugQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateDepthToSpace (const DepthToSpaceQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateDepthwiseConvolution2d (const DepthwiseConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateDequantize (const DequantizeQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateDetectionPostProcess (const DetectionPostProcessQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateDivision (const DivisionQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateElementwiseUnary (const ElementwiseUnaryQueueDescriptor &descriptor, const WorkloadInfo &Info) const
 
virtual std::unique_ptr< IWorkloadCreateEqual (const EqualQueueDescriptor &descriptor, const WorkloadInfo &Info) const
 
virtual std::unique_ptr< IWorkloadCreateFakeQuantization (const FakeQuantizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateFloor (const FloorQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateFullyConnected (const FullyConnectedQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateGather (const GatherQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateGreater (const GreaterQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateInstanceNormalization (const InstanceNormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateL2Normalization (const L2NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateLogSoftmax (const LogSoftmaxQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateLstm (const LstmQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateMaximum (const MaximumQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateMean (const MeanQueueDescriptor &descriptor, const WorkloadInfo &Info) const
 
virtual std::unique_ptr< IWorkloadCreateMemCopy (const MemCopyQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateMemImport (const MemImportQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateMerge (const MergeQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateMerger (const MergerQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateMinimum (const MinimumQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateMultiplication (const MultiplicationQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateNormalization (const NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateOutput (const OutputQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreatePad (const PadQueueDescriptor &descriptor, const WorkloadInfo &Info) const
 
virtual std::unique_ptr< IWorkloadCreatePermute (const PermuteQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreatePooling2d (const Pooling2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreatePreCompiled (const PreCompiledQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreatePrelu (const PreluQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateQuantize (const QuantizeQueueDescriptor &descriptor, const WorkloadInfo &Info) const
 
virtual std::unique_ptr< IWorkloadCreateQuantizedLstm (const QuantizedLstmQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateReshape (const ReshapeQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateResize (const ResizeQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateResizeBilinear (const ResizeBilinearQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateRsqrt (const RsqrtQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateSlice (const SliceQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateSoftmax (const SoftmaxQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateSpaceToBatchNd (const SpaceToBatchNdQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateSpaceToDepth (const SpaceToDepthQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateSubtraction (const SubtractionQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateSplitter (const SplitterQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateStack (const StackQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateStridedSlice (const StridedSliceQueueDescriptor &descriptor, const WorkloadInfo &Info) const
 
virtual std::unique_ptr< IWorkloadCreateSwitch (const SwitchQueueDescriptor &descriptor, const WorkloadInfo &Info) const
 
virtual std::unique_ptr< IWorkloadCreateTranspose (const TransposeQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
virtual std::unique_ptr< IWorkloadCreateTransposeConvolution2d (const TransposeConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
 
+ + + + + +

+Static Public Member Functions

static bool IsLayerSupported (const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
 
static bool IsLayerSupported (const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
 
+

Detailed Description

+
+

Definition at line 21 of file WorkloadFactory.hpp.

+

Constructor & Destructor Documentation

+ +

◆ ~IWorkloadFactory()

+ +
+
+ + + + + +
+ + + + + + + +
virtual ~IWorkloadFactory ()
+
+inlinevirtual
+
+ +

Definition at line 24 of file WorkloadFactory.hpp.

+ +

References ARMNN_DEPRECATED_MSG, IWorkloadFactory::CreateAbs(), IWorkloadFactory::CreateActivation(), IWorkloadFactory::CreateAddition(), IWorkloadFactory::CreateArgMinMax(), IWorkloadFactory::CreateBatchNormalization(), IWorkloadFactory::CreateBatchToSpaceNd(), IWorkloadFactory::CreateComparison(), IWorkloadFactory::CreateConcat(), IWorkloadFactory::CreateConstant(), IWorkloadFactory::CreateConvertFp16ToFp32(), IWorkloadFactory::CreateConvertFp32ToFp16(), IWorkloadFactory::CreateConvolution2d(), IWorkloadFactory::CreateDebug(), IWorkloadFactory::CreateDepthToSpace(), IWorkloadFactory::CreateDepthwiseConvolution2d(), IWorkloadFactory::CreateDequantize(), IWorkloadFactory::CreateDetectionPostProcess(), IWorkloadFactory::CreateDivision(), IWorkloadFactory::CreateElementwiseUnary(), IWorkloadFactory::CreateEqual(), IWorkloadFactory::CreateFakeQuantization(), IWorkloadFactory::CreateFloor(), IWorkloadFactory::CreateFullyConnected(), IWorkloadFactory::CreateGather(), IWorkloadFactory::CreateGreater(), IWorkloadFactory::CreateInput(), IWorkloadFactory::CreateInstanceNormalization(), IWorkloadFactory::CreateL2Normalization(), IWorkloadFactory::CreateLogSoftmax(), IWorkloadFactory::CreateLstm(), IWorkloadFactory::CreateMaximum(), IWorkloadFactory::CreateMean(), IWorkloadFactory::CreateMemCopy(), IWorkloadFactory::CreateMemImport(), IWorkloadFactory::CreateMerge(), IWorkloadFactory::CreateMerger(), IWorkloadFactory::CreateMinimum(), IWorkloadFactory::CreateMultiplication(), IWorkloadFactory::CreateNormalization(), IWorkloadFactory::CreateOutput(), IWorkloadFactory::CreatePad(), IWorkloadFactory::CreatePermute(), IWorkloadFactory::CreatePooling2d(), IWorkloadFactory::CreatePreCompiled(), IWorkloadFactory::CreatePrelu(), IWorkloadFactory::CreateQuantize(), IWorkloadFactory::CreateQuantizedLstm(), IWorkloadFactory::CreateReshape(), IWorkloadFactory::CreateResize(), IWorkloadFactory::CreateResizeBilinear(), IWorkloadFactory::CreateRsqrt(), IWorkloadFactory::CreateSlice(), IWorkloadFactory::CreateSoftmax(), IWorkloadFactory::CreateSpaceToBatchNd(), IWorkloadFactory::CreateSpaceToDepth(), IWorkloadFactory::CreateSplitter(), IWorkloadFactory::CreateStack(), IWorkloadFactory::CreateStridedSlice(), IWorkloadFactory::CreateSubTensorHandle(), IWorkloadFactory::CreateSubtraction(), IWorkloadFactory::CreateSwitch(), IWorkloadFactory::CreateTensorHandle(), IWorkloadFactory::CreateTranspose(), IWorkloadFactory::CreateTransposeConvolution2d(), IWorkloadFactory::GetBackendId(), armnn::Info, armnn::info, IWorkloadFactory::IsLayerSupported(), and IWorkloadFactory::SupportsSubTensors().

+
24 { }
+
+
+

Member Function Documentation

+ +

◆ CreateAbs()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateAbs (const AbsQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in RefWorkloadFactory, ClWorkloadFactory, NeonWorkloadFactory, and WorkloadFactoryBase.

+ +

Definition at line 1093 of file WorkloadFactory.cpp.

+ +

Referenced by AbsLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

+
1095 {
1096  return std::unique_ptr<IWorkload>();
1097 }
+
+
+ +

◆ CreateActivation()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateActivation (const ActivationQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+
+ +

◆ CreateAddition()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateAddition (const AdditionQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+
+ +

◆ CreateArgMinMax()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateArgMinMax (const ArgMinMaxQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in RefWorkloadFactory, ClWorkloadFactory, NeonWorkloadFactory, and WorkloadFactoryBase.

+ +

Definition at line 1111 of file WorkloadFactory.cpp.

+ +

Referenced by ArgMinMaxLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

+
1113 {
1114  return std::unique_ptr<IWorkload>();
1115 }
+
+
+ +

◆ CreateBatchNormalization()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateBatchNormalization (const BatchNormalizationQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in RefWorkloadFactory, ClWorkloadFactory, NeonWorkloadFactory, and WorkloadFactoryBase.

+ +

Definition at line 1117 of file WorkloadFactory.cpp.

+ +

Referenced by CompareBatchNormTest(), BatchNormalizationLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

+
1119 {
1120  return std::unique_ptr<IWorkload>();
1121 }
+
+
+ +

◆ CreateBatchToSpaceNd()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateBatchToSpaceNd (const BatchToSpaceNdQueueDescriptordescriptor,
const WorkloadInfoInfo 
) const
+
+virtual
+
+ +

Reimplemented in RefWorkloadFactory, WorkloadFactoryBase, ClWorkloadFactory, and NeonWorkloadFactory.

+ +

Definition at line 1123 of file WorkloadFactory.cpp.

+ +

Referenced by BatchToSpaceNdLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

+
1125 {
1126  return std::unique_ptr<IWorkload>();
1127 }
+
+
+ +

◆ CreateComparison()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateComparison (const ComparisonQueueDescriptordescriptor,
const WorkloadInfoInfo 
) const
+
+virtual
+
+ +

Reimplemented in RefWorkloadFactory, WorkloadFactoryBase, ClWorkloadFactory, and NeonWorkloadFactory.

+ +

Definition at line 1129 of file WorkloadFactory.cpp.

+ +

Referenced by ComparisonLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

+
1131 {
1132  return std::unique_ptr<IWorkload>();
1133 }
+
+
+ +

◆ CreateConcat()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateConcat (const ConcatQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+
+ +

◆ CreateConstant()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateConstant (const ConstantQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in RefWorkloadFactory, WorkloadFactoryBase, ClWorkloadFactory, and NeonWorkloadFactory.

+ +

Definition at line 1141 of file WorkloadFactory.cpp.

+ +

Referenced by ConstantLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

+
1143 {
1144  return std::unique_ptr<IWorkload>();
1145 }
+
+
+ +

◆ CreateConvertFp16ToFp32()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateConvertFp16ToFp32 (const ConvertFp16ToFp32QueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in RefWorkloadFactory, WorkloadFactoryBase, ClWorkloadFactory, and NeonWorkloadFactory.

+ +

Definition at line 1147 of file WorkloadFactory.cpp.

+ +

Referenced by ConvertFp16ToFp32Layer::CreateWorkload(), SimpleConvertFp16ToFp32Test(), and IWorkloadFactory::~IWorkloadFactory().

+
1149 {
1150  return std::unique_ptr<IWorkload>();
1151 }
+
+
+ +

◆ CreateConvertFp32ToFp16()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateConvertFp32ToFp16 (const ConvertFp32ToFp16QueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in RefWorkloadFactory, WorkloadFactoryBase, ClWorkloadFactory, and NeonWorkloadFactory.

+ +

Definition at line 1153 of file WorkloadFactory.cpp.

+ +

Referenced by ConvertFp32ToFp16Layer::CreateWorkload(), SimpleConvertFp32ToFp16Test(), and IWorkloadFactory::~IWorkloadFactory().

+
1155 {
1156  return std::unique_ptr<IWorkload>();
1157 }
+
+
+ +

◆ CreateConvolution2d()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateConvolution2d (const Convolution2dQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+
+ +

◆ CreateDebug()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateDebug (const DebugQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in RefWorkloadFactory, WorkloadFactoryBase, ClWorkloadFactory, and NeonWorkloadFactory.

+ +

Definition at line 1165 of file WorkloadFactory.cpp.

+ +

Referenced by DebugLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

+
1167 {
1168  return std::unique_ptr<IWorkload>();
1169 }
+
+
+ +

◆ CreateDepthToSpace()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateDepthToSpace (const DepthToSpaceQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in RefWorkloadFactory, WorkloadFactoryBase, ClWorkloadFactory, and NeonWorkloadFactory.

+ +

Definition at line 1171 of file WorkloadFactory.cpp.

+ +

Referenced by DepthToSpaceLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

+
1173 {
1174  return std::unique_ptr<IWorkload>();
1175 }
+
+
+ +

◆ CreateDepthwiseConvolution2d()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateDepthwiseConvolution2d (const DepthwiseConvolution2dQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+
+ +

◆ CreateDequantize()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateDequantize (const DequantizeQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in RefWorkloadFactory, WorkloadFactoryBase, ClWorkloadFactory, and NeonWorkloadFactory.

+ +

Definition at line 1183 of file WorkloadFactory.cpp.

+ +

Referenced by DequantizeLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

+
1185 {
1186  return std::unique_ptr<IWorkload>();
1187 }
+
+
+ +

◆ CreateDetectionPostProcess()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateDetectionPostProcess (const DetectionPostProcessQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in RefWorkloadFactory, WorkloadFactoryBase, ClWorkloadFactory, and NeonWorkloadFactory.

+ +

Definition at line 1189 of file WorkloadFactory.cpp.

+ +

Referenced by DetectionPostProcessLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

+
1191 {
1192  return std::unique_ptr<IWorkload>();
1193 }
+
+
+ +

◆ CreateDivision()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateDivision (const DivisionQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in RefWorkloadFactory, WorkloadFactoryBase, ClWorkloadFactory, and NeonWorkloadFactory.

+ +

Definition at line 1195 of file WorkloadFactory.cpp.

+ +

Referenced by DivisionLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

+
1197 {
1198  return std::unique_ptr<IWorkload>();
1199 }
+
+
+ +

◆ CreateElementwiseUnary()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateElementwiseUnary (const ElementwiseUnaryQueueDescriptordescriptor,
const WorkloadInfoInfo 
) const
+
+virtual
+
+ +

Reimplemented in RefWorkloadFactory, WorkloadFactoryBase, ClWorkloadFactory, and NeonWorkloadFactory.

+ +

Definition at line 1201 of file WorkloadFactory.cpp.

+ +

Referenced by ElementwiseUnaryLayer::CreateWorkload(), CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

+
1203 {
1204  return std::unique_ptr<IWorkload>();
1205 }
+
+
+ +

◆ CreateEqual()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateEqual (const EqualQueueDescriptordescriptor,
const WorkloadInfoInfo 
) const
+
+virtual
+
+ +

Reimplemented in RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

+ +

Definition at line 1207 of file WorkloadFactory.cpp.

+ +

Referenced by IWorkloadFactory::~IWorkloadFactory().

+
1209 {
1210  return std::unique_ptr<IWorkload>();
1211 }
+
+
+ +

◆ CreateFakeQuantization()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateFakeQuantization (const FakeQuantizationQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in WorkloadFactoryBase, and RefWorkloadFactory.

+ +

Definition at line 1213 of file WorkloadFactory.cpp.

+ +

Referenced by FakeQuantizationLayer::CreateWorkload(), FakeQuantizationTest(), and IWorkloadFactory::~IWorkloadFactory().

+
1215 {
1216  return std::unique_ptr<IWorkload>();
1217 }
+
+
+ +

◆ CreateFloor()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateFloor (const FloorQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

+ +

Definition at line 1219 of file WorkloadFactory.cpp.

+ +

Referenced by FloorLayer::CreateWorkload(), SimpleFloorTest(), and IWorkloadFactory::~IWorkloadFactory().

+
1221 {
1222  return std::unique_ptr<IWorkload>();
1223 }
+
+
+ +

◆ CreateFullyConnected()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateFullyConnected (const FullyConnectedQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

+ +

Definition at line 1225 of file WorkloadFactory.cpp.

+ +

Referenced by FullyConnectedLayer::CreateWorkload(), SimpleFullyConnectedTestImpl(), and IWorkloadFactory::~IWorkloadFactory().

+
1227 {
1228  return std::unique_ptr<IWorkload>();
1229 }
+
+
+ +

◆ CreateGather()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateGather (const GatherQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

+ +

Definition at line 1231 of file WorkloadFactory.cpp.

+ +

Referenced by GatherLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

+
1233 {
1234  return std::unique_ptr<IWorkload>();
1235 }
+
+
+ +

◆ CreateGreater()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateGreater (const GreaterQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

+ +

Definition at line 1237 of file WorkloadFactory.cpp.

+ +

Referenced by IWorkloadFactory::~IWorkloadFactory().

+
1239 {
1240  return std::unique_ptr<IWorkload>();
1241 }
+
+
+ +

◆ CreateInput()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
virtual std::unique_ptr<IWorkload> CreateInput (const InputQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+pure virtual
+
+
+ +

◆ CreateInstanceNormalization()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateInstanceNormalization (const InstanceNormalizationQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in RefWorkloadFactory, WorkloadFactoryBase, NeonWorkloadFactory, and ClWorkloadFactory.

+ +

Definition at line 1243 of file WorkloadFactory.cpp.

+ +

Referenced by InstanceNormalizationLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

+
1246 {
1247  return std::unique_ptr<IWorkload>();
1248 }
+
+
+ +

◆ CreateL2Normalization()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateL2Normalization (const L2NormalizationQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

+ +

Definition at line 1250 of file WorkloadFactory.cpp.

+ +

Referenced by L2NormalizationLayer::CreateWorkload(), L2Normalization2dShapeTest(), and IWorkloadFactory::~IWorkloadFactory().

+
1252 {
1253  return std::unique_ptr<IWorkload>();
1254 }
+
+
+ +

◆ CreateLogSoftmax()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateLogSoftmax (const LogSoftmaxQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in WorkloadFactoryBase, and RefWorkloadFactory.

+ +

Definition at line 1256 of file WorkloadFactory.cpp.

+ +

Referenced by LogSoftmaxLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

+
1258 {
1259  return std::unique_ptr<IWorkload>();
1260 }
+
+
+ +

◆ CreateLstm()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateLstm (const LstmQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

+ +

Definition at line 1262 of file WorkloadFactory.cpp.

+ +

Referenced by LstmLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

+
1264 {
1265  return std::unique_ptr<IWorkload>();
1266 }
+
+
+ +

◆ CreateMaximum()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateMaximum (const MaximumQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

+ +

Definition at line 1268 of file WorkloadFactory.cpp.

+ +

Referenced by MaximumLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

+
1270 {
1271  return std::unique_ptr<IWorkload>();
1272 }
+
+
+ +

◆ CreateMean()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateMean (const MeanQueueDescriptordescriptor,
const WorkloadInfoInfo 
) const
+
+virtual
+
+ +

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

+ +

Definition at line 1274 of file WorkloadFactory.cpp.

+ +

Referenced by MeanLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

+
1276 {
1277  return std::unique_ptr<IWorkload>();
1278 }
+
+
+ +

◆ CreateMemCopy()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateMemCopy (const MemCopyQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

+ +

Definition at line 1280 of file WorkloadFactory.cpp.

+ +

Referenced by IWorkloadFactory::~IWorkloadFactory().

+
1282 {
1283  return std::unique_ptr<IWorkload>();
1284 }
+
+
+ +

◆ CreateMemImport()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateMemImport (const MemImportQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

+ +

Definition at line 1286 of file WorkloadFactory.cpp.

+ +

Referenced by IWorkloadFactory::~IWorkloadFactory().

+
1288 {
1289  return std::unique_ptr<IWorkload>();
1290 }
+
+
+ +

◆ CreateMerge()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateMerge (const MergeQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in WorkloadFactoryBase.

+ +

Definition at line 1292 of file WorkloadFactory.cpp.

+ +

Referenced by IWorkloadFactory::~IWorkloadFactory().

+
1294 {
1295  return std::unique_ptr<IWorkload>();
1296 }
+
+
+ +

◆ CreateMerger()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateMerger (const MergerQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

+ +

Definition at line 1298 of file WorkloadFactory.cpp.

+ +

Referenced by IWorkloadFactory::~IWorkloadFactory().

+
1300 {
1301  return std::unique_ptr<IWorkload>();
1302 }
+
+
+ +

◆ CreateMinimum()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateMinimum (const MinimumQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

+ +

Definition at line 1304 of file WorkloadFactory.cpp.

+ +

Referenced by MinimumLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

+
1306 {
1307  return std::unique_ptr<IWorkload>();
1308 }
+
+
+ +

◆ CreateMultiplication()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateMultiplication (const MultiplicationQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

+ +

Definition at line 1310 of file WorkloadFactory.cpp.

+ +

Referenced by CompareMultiplicationTest(), MultiplicationLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

+
1312 {
1313  return std::unique_ptr<IWorkload>();
1314 }
+
+
+ +

◆ CreateNormalization()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateNormalization (const NormalizationQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

+ +

Definition at line 1316 of file WorkloadFactory.cpp.

+ +

Referenced by NormalizationLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

+
1318 {
1319  return std::unique_ptr<IWorkload>();
1320 }
+
+
+ +

◆ CreateOutput()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateOutput (const OutputQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, ClWorkloadFactory, and SampleDynamicWorkloadFactory.

+ +

Definition at line 1322 of file WorkloadFactory.cpp.

+ +

Referenced by IWorkloadFactory::~IWorkloadFactory().

+
1324 {
1325  return std::unique_ptr<IWorkload>();
1326 }
+
+
+ +

◆ CreatePad()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreatePad (const PadQueueDescriptordescriptor,
const WorkloadInfoInfo 
) const
+
+virtual
+
+ +

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

+ +

Definition at line 1328 of file WorkloadFactory.cpp.

+ +

Referenced by PadLayer::CreateWorkload(), Pad2dTestCommon(), Pad3dTestCommon(), Pad4dTestCommon(), and IWorkloadFactory::~IWorkloadFactory().

+
1330 {
1331  return std::unique_ptr<IWorkload>();
1332 }
+
+
+ +

◆ CreatePermute()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreatePermute (const PermuteQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

+ +

Definition at line 1334 of file WorkloadFactory.cpp.

+ +

Referenced by PermuteLayer::CreateWorkload(), PermuteTensorData(), SimplePermuteTestImpl(), and IWorkloadFactory::~IWorkloadFactory().

+
1336 {
1337  return std::unique_ptr<IWorkload>();
1338 }
+
+
+ +

◆ CreatePooling2d()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreatePooling2d (const Pooling2dQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

+ +

Definition at line 1340 of file WorkloadFactory.cpp.

+ +

Referenced by AdditionAfterMaxPoolTest(), Pooling2dLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

+
1342 {
1343  return std::unique_ptr<IWorkload>();
1344 }
+
+
+ +

◆ CreatePreCompiled()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreatePreCompiled (const PreCompiledQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

+ +

Definition at line 1346 of file WorkloadFactory.cpp.

+ +

Referenced by PreCompiledLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

+
1348 {
1349  return std::unique_ptr<IWorkload>();
1350 }
+
+
+ +

◆ CreatePrelu()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreatePrelu (const PreluQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

+ +

Definition at line 1352 of file WorkloadFactory.cpp.

+ +

Referenced by PreluLayer::CreateWorkload(), PreluTest(), and IWorkloadFactory::~IWorkloadFactory().

+
1354 {
1355  return std::unique_ptr<IWorkload>();
1356 }
+
+
+ +

◆ CreateQuantize()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateQuantize (const QuantizeQueueDescriptordescriptor,
const WorkloadInfoInfo 
) const
+
+virtual
+
+ +

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

+ +

Definition at line 1358 of file WorkloadFactory.cpp.

+ +

Referenced by QuantizeLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

+
1360 {
1361  return std::unique_ptr<IWorkload>();
1362 }
+
+
+ +

◆ CreateQuantizedLstm()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateQuantizedLstm (const QuantizedLstmQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in WorkloadFactoryBase, NeonWorkloadFactory, and ClWorkloadFactory.

+ +

Definition at line 1364 of file WorkloadFactory.cpp.

+ +

Referenced by QuantizedLstmLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

+
1366 {
1367  return std::unique_ptr<IWorkload>();
1368 }
+
+
+ +

◆ CreateReshape()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateReshape (const ReshapeQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

+ +

Definition at line 1370 of file WorkloadFactory.cpp.

+ +

Referenced by ReshapeLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

+
1372 {
1373  return std::unique_ptr<IWorkload>();
1374 }
+
+
+ +

◆ CreateResize()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateResize (const ResizeQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

+ +

Definition at line 1382 of file WorkloadFactory.cpp.

+ +

Referenced by ResizeLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

+
1384 {
1385  return std::unique_ptr<IWorkload>();
1386 }
+
+
+ +

◆ CreateResizeBilinear()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateResizeBilinear (const ResizeBilinearQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

+ +

Definition at line 1376 of file WorkloadFactory.cpp.

+ +

Referenced by IWorkloadFactory::~IWorkloadFactory().

+
1378 {
1379  return std::unique_ptr<IWorkload>();
1380 }
+
+
+ +

◆ CreateRsqrt()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateRsqrt (const RsqrtQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

+ +

Definition at line 1388 of file WorkloadFactory.cpp.

+ +

Referenced by RsqrtLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

+
1390 {
1391  return std::unique_ptr<IWorkload>();
1392 }
+
+
+ +

◆ CreateSlice()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateSlice (const SliceQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

+ +

Definition at line 1394 of file WorkloadFactory.cpp.

+ +

Referenced by SliceLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

+
1396 {
1397  return std::unique_ptr<IWorkload>();
1398 }
+
+
+ +

◆ CreateSoftmax()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateSoftmax (const SoftmaxQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

+ +

Definition at line 1400 of file WorkloadFactory.cpp.

+ +

Referenced by SoftmaxLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

+
1402 {
1403  return std::unique_ptr<IWorkload>();
1404 }
+
+
+ +

◆ CreateSpaceToBatchNd()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateSpaceToBatchNd (const SpaceToBatchNdQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

+ +

Definition at line 1412 of file WorkloadFactory.cpp.

+ +

Referenced by SpaceToBatchNdLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

+
1414 {
1415  return std::unique_ptr<IWorkload>();
1416 }
+
+
+ +

◆ CreateSpaceToDepth()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateSpaceToDepth (const SpaceToDepthQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

+ +

Definition at line 1418 of file WorkloadFactory.cpp.

+ +

Referenced by SpaceToDepthLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

+
1420 {
1421  return std::unique_ptr<IWorkload>();
1422 }
+
+
+ +

◆ CreateSplitter()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateSplitter (const SplitterQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

+ +

Definition at line 1406 of file WorkloadFactory.cpp.

+ +

Referenced by SplitterLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

+
1408 {
1409  return std::unique_ptr<IWorkload>();
1410 }
+
+
+ +

◆ CreateStack()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateStack (const StackQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

+ +

Definition at line 1424 of file WorkloadFactory.cpp.

+ +

Referenced by StackLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

+
1426 {
1427  return std::unique_ptr<IWorkload>();
1428 }
+
+
+ +

◆ CreateStridedSlice()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateStridedSlice (const StridedSliceQueueDescriptordescriptor,
const WorkloadInfoInfo 
) const
+
+virtual
+
+ +

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

+ +

Definition at line 1430 of file WorkloadFactory.cpp.

+ +

Referenced by StridedSliceLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

+
1432 {
1433  return std::unique_ptr<IWorkload>();
1434 }
+
+
+ +

◆ CreateSubTensorHandle()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
virtual std::unique_ptr<ITensorHandle> CreateSubTensorHandle (ITensorHandleparent,
TensorShape const & subTensorShape,
unsigned int const * subTensorOrigin 
) const
+
+pure virtual
+
+
+ +

◆ CreateSubtraction()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateSubtraction (const SubtractionQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

+ +

Definition at line 1436 of file WorkloadFactory.cpp.

+ +

Referenced by SubtractionLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

+
1438 {
1439  return std::unique_ptr<IWorkload>();
1440 }
+
+
+ +

◆ CreateSwitch()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateSwitch (const SwitchQueueDescriptordescriptor,
const WorkloadInfoInfo 
) const
+
+virtual
+
+ +

Reimplemented in WorkloadFactoryBase.

+ +

Definition at line 1442 of file WorkloadFactory.cpp.

+ +

Referenced by SwitchLayer::CreateWorkload(), and IWorkloadFactory::~IWorkloadFactory().

+
1444 {
1445  return std::unique_ptr<IWorkload>();
1446 }
+
+
+ +

◆ CreateTensorHandle() [1/2]

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
virtual std::unique_ptr<ITensorHandle> CreateTensorHandle (const TensorInfotensorInfo,
const bool IsMemoryManaged = true 
) const
+
+pure virtual
+
+
+ +

◆ CreateTensorHandle() [2/2]

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
virtual std::unique_ptr<ITensorHandle> CreateTensorHandle (const TensorInfotensorInfo,
DataLayout dataLayout,
const bool IsMemoryManaged = true 
) const
+
+pure virtual
+
+
+ +

◆ CreateTranspose()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateTranspose (const TransposeQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

+ +

Definition at line 1448 of file WorkloadFactory.cpp.

+ +

Referenced by TransposeLayer::CreateWorkload(), SimpleTransposeTestImpl(), and IWorkloadFactory::~IWorkloadFactory().

+
1450 {
1451  return std::unique_ptr<IWorkload>();
1452 }
+
+
+ +

◆ CreateTransposeConvolution2d()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::unique_ptr< IWorkload > CreateTransposeConvolution2d (const TransposeConvolution2dQueueDescriptordescriptor,
const WorkloadInfoinfo 
) const
+
+virtual
+
+ +

Reimplemented in WorkloadFactoryBase, RefWorkloadFactory, NeonWorkloadFactory, and ClWorkloadFactory.

+ +

Definition at line 1454 of file WorkloadFactory.cpp.

+ +

Referenced by TransposeConvolution2dLayer::CreateWorkload(), TransposeConvolution2dPerAxisQuantTest(), and IWorkloadFactory::~IWorkloadFactory().

+
1457 {
1458  return std::unique_ptr<IWorkload>();
1459 }
+
+
+ +

◆ GetBackendId()

+ +
+
+ + + + + +
+ + + + + + + +
virtual const BackendId& GetBackendId () const
+
+pure virtual
+
+
+ +

◆ IsLayerSupported() [1/2]

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
bool IsLayerSupported (const BackendIdbackendId,
const IConnectableLayerlayer,
Optional< DataTypedataType,
std::string & outReasonIfUnsupported 
)
+
+static
+
+ +

Definition at line 45 of file WorkloadFactory.cpp.

+ +

References armnn::Activation, armnn::Addition, anchors(), armnn::ArgMinMax, armnn::BackendRegistryInstance(), armnn::BatchNormalization, armnn::BatchToSpaceNd, armnn::BFloat16, armnn::Boolean, boxEncodings(), armnn::Comparison, armnn::Concat, armnn::Constant, armnn::ConvertFp16ToFp32, armnn::ConvertFp32ToFp16, armnn::Convolution2d, armnn::Debug, armnn::DepthToSpace, armnn::DepthwiseConvolution2d, armnn::Dequantize, armnn::DetectionPostProcess, armnn::Division, armnn::ElementwiseUnary, armnn::FakeQuantization, armnn::Float16, armnn::Float32, armnn::Floor, armnn::FullyConnected, armnn::Gather, armnn::GetBiasTypeFromWeightsType(), InputSlot::GetConnection(), Layer::GetInputSlot(), Layer::GetInputSlots(), IConnectableLayer::GetName(), Layer::GetOutputSlot(), Layer::GetOutputSlots(), IOutputSlot::GetTensorInfo(), OutputSlot::GetTensorInfo(), Layer::GetType(), armnn::info, armnn::Input, armnn::InstanceNormalization, armnn::L2Normalization, armnn::LogSoftmax, armnn::Lstm, FullyConnectedDescriptor::m_BiasEnabled, QuantizedLstmInputParamsInfo::m_CellBias, QuantizedLstmInputParamsInfo::m_ForgetGateBias, QuantizedLstmInputParamsInfo::m_InputGateBias, QuantizedLstmInputParamsInfo::m_InputToCellWeights, LstmInputParamsInfo::m_InputToForgetWeights, QuantizedLstmInputParamsInfo::m_InputToForgetWeights, QuantizedLstmInputParamsInfo::m_InputToInputWeights, QuantizedLstmInputParamsInfo::m_InputToOutputWeights, QuantizedLstmInputParamsInfo::m_OutputGateBias, QuantizedLstmInputParamsInfo::m_RecurrentToCellWeights, QuantizedLstmInputParamsInfo::m_RecurrentToForgetWeights, QuantizedLstmInputParamsInfo::m_RecurrentToInputWeights, QuantizedLstmInputParamsInfo::m_RecurrentToOutputWeights, armnn::Maximum, armnn::Mean, armnn::MemCopy, armnn::MemImport, armnn::Merge, armnn::Minimum, armnn::Multiplication, armnn::Normalization, armnn::Output, armnn::Pad, armnn::Permute, armnn::Pooling2d, armnn::PreCompiled, armnn::Prelu, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Quantize, armnn::QuantizedLstm, armnn::Reshape, armnn::Resize, scores(), armnn::Signed32, armnn::Slice, armnn::Softmax, armnn::SpaceToBatchNd, armnn::SpaceToDepth, armnn::Splitter, armnn::Stack, armnn::StandIn, armnn::StridedSlice, armnn::Subtraction, armnn::Switch, armnn::Transpose, armnn::TransposeConvolution2d, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

+ +

Referenced by armnn::AttemptBackendAssignment(), LoadedNetwork::GetOutputTensorInfo(), ClWorkloadFactory::IsLayerSupported(), NeonWorkloadFactory::IsLayerSupported(), SampleDynamicWorkloadFactory::IsLayerSupported(), IWorkloadFactory::IsLayerSupported(), RefWorkloadFactory::IsLayerSupported(), and IWorkloadFactory::~IWorkloadFactory().

+
49 {
50  Optional<std::string&> reason = outReasonIfUnsupported;
51  bool result;
52  const Layer& layer = *(boost::polymorphic_downcast<const Layer*>(&connectableLayer));
53 
54  auto const& backendRegistry = BackendRegistryInstance();
55  if (!backendRegistry.IsBackendRegistered(backendId))
56  {
57  std::stringstream ss;
58  ss << connectableLayer.GetName() << " is not supported on " << backendId
59  << " because this backend is not registered.";
60 
61  outReasonIfUnsupported = ss.str();
62  return false;
63  }
64 
65  auto backendFactory = backendRegistry.GetFactory(backendId);
66  auto backendObject = backendFactory();
67  auto layerSupportObject = backendObject->GetLayerSupport();
68 
69  switch(layer.GetType())
70  {
72  {
73  auto cLayer = boost::polymorphic_downcast<const ActivationLayer*>(&layer);
74  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
75  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
76  result = layerSupportObject->IsActivationSupported(
77  OverrideDataType(input, dataType),
78  OverrideDataType(output, dataType),
79  cLayer->GetParameters(),
80  reason);
81  break;
82  }
84  {
85  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
86  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
87  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
88  result = layerSupportObject->IsAdditionSupported(
89  OverrideDataType(input0, dataType),
90  OverrideDataType(input1, dataType),
91  OverrideDataType(output, dataType),
92  reason);
93  break;
94  }
96  {
97  auto cLayer = boost::polymorphic_downcast<const ArgMinMaxLayer*>(&layer);
98  const ArgMinMaxDescriptor& descriptor = cLayer->GetParameters();
99 
100  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
101  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
102  result = layerSupportObject->IsArgMinMaxSupported(
103  OverrideDataType(input, dataType),
104  OverrideDataType(output, DataType::Signed32),
105  descriptor,
106  reason);
107  break;
108  }
110  {
111  auto cLayer = boost::polymorphic_downcast<const BatchNormalizationLayer*>(&layer);
112  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
113  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
114  const TensorInfo& mean = cLayer->m_Mean->GetTensorInfo();
115  const TensorInfo& var = cLayer->m_Variance->GetTensorInfo();
116  const TensorInfo& beta = cLayer->m_Beta->GetTensorInfo();
117  const TensorInfo& gamma = cLayer->m_Gamma->GetTensorInfo();
118  result = layerSupportObject->IsBatchNormalizationSupported(
119  OverrideDataType(input, dataType),
120  OverrideDataType(output, dataType),
121  OverrideDataType(mean, dataType),
122  OverrideDataType(var, dataType),
123  OverrideDataType(beta, dataType),
124  OverrideDataType(gamma, dataType),
125  cLayer->GetParameters(),
126  reason);
127  break;
128  }
130  {
131  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
132  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
133  auto cLayer = boost::polymorphic_downcast<const BatchToSpaceNdLayer*>(&layer);
134 
135  result = layerSupportObject->IsBatchToSpaceNdSupported(OverrideDataType(input, dataType),
136  OverrideDataType(output, dataType),
137  cLayer->GetParameters(),
138  reason);
139  break;
140  }
142  {
143  auto cLayer = boost::polymorphic_downcast<const ComparisonLayer*>(&layer);
144 
145  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
146  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
147  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
148 
149  result = layerSupportObject->IsComparisonSupported(OverrideDataType(input0, dataType),
150  OverrideDataType(input1, dataType),
151  OverrideDataType(output, DataType::Boolean),
152  cLayer->GetParameters(),
153  reason);
154  break;
155  }
156  case LayerType::Constant:
157  {
158  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
159  result = layerSupportObject->IsConstantSupported(OverrideDataType(output, dataType), reason);
160  break;
161  }
163  {
164  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
165  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
166  result = layerSupportObject->IsConvertFp16ToFp32Supported(input, output, reason);
167  break;
168  }
170  {
171  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
172  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
173  result = layerSupportObject->IsConvertFp32ToFp16Supported(input, output, reason);
174  break;
175  }
177  {
178  auto cLayer = boost::polymorphic_downcast<const Convolution2dLayer*>(&layer);
179 
180  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
181  dataType);
182  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
183  BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
184 
185  const Convolution2dDescriptor& descriptor = cLayer->GetParameters();
186 
187  // Construct optional biases object based on the value of m_BiasEnabled
188  Optional<TensorInfo> biases;
189  if (descriptor.m_BiasEnabled)
190  {
191  biases =
192  OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
193  }
194 
195  result = layerSupportObject->IsConvolution2dSupported(
196  input,
197  output,
198  descriptor,
199  OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
200  biases,
201  reason);
202  break;
203  }
204  case LayerType::Debug:
205  {
206  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
207  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
208 
209  result = layerSupportObject->IsDebugSupported(OverrideDataType(input, dataType),
210  OverrideDataType(output, dataType),
211  reason);
212  break;
213  }
215  {
216  auto cLayer = boost::polymorphic_downcast<const DepthToSpaceLayer*>(&layer);
217 
218  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
219  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
220 
221  result = layerSupportObject->IsDepthToSpaceSupported(OverrideDataType(input, dataType),
222  OverrideDataType(output, dataType),
223  cLayer->GetParameters(),
224  reason);
225  break;
226  }
228  {
229  auto cLayer = boost::polymorphic_downcast<const DepthwiseConvolution2dLayer*>(&layer);
230  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
231  dataType);
232  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
233  BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
234 
235  const DepthwiseConvolution2dDescriptor& descriptor = cLayer->GetParameters();
236 
237  // Construct optional biases object based on the value of m_BiasEnabled
238  Optional<TensorInfo> biases;
239  if (descriptor.m_BiasEnabled)
240  {
241  biases =
242  OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
243  }
244 
245  result = layerSupportObject->IsDepthwiseConvolutionSupported(
246  input,
247  output,
248  descriptor,
249  OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
250  biases,
251  reason);
252  break;
253  }
255  {
256  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
257  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
258 
259  result = layerSupportObject->IsDequantizeSupported(input,
260  OverrideDataType(output, dataType),
261  reason);
262  break;
263  }
265  {
266  auto cLayer = boost::polymorphic_downcast<const DetectionPostProcessLayer*>(&layer);
267  const TensorInfo& boxEncodings = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
268  const TensorInfo& scores = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
269  const TensorInfo& anchors = cLayer->m_Anchors->GetTensorInfo();
270 
271  const TensorInfo& detectionBoxes = layer.GetOutputSlot(0).GetTensorInfo();
272  const TensorInfo& detectionClasses = layer.GetOutputSlot(1).GetTensorInfo();
273  const TensorInfo& detectionScores = layer.GetOutputSlot(2).GetTensorInfo();
274  const TensorInfo& numDetections = layer.GetOutputSlot(3).GetTensorInfo();
275 
276  const DetectionPostProcessDescriptor& descriptor = cLayer->GetParameters();
277  result = layerSupportObject->IsDetectionPostProcessSupported(boxEncodings,
278  scores,
279  anchors,
280  detectionBoxes,
281  detectionClasses,
282  detectionScores,
283  numDetections,
284  descriptor,
285  reason);
286  break;
287  }
289  {
290  auto cLayer = boost::polymorphic_downcast<const ElementwiseUnaryLayer*>(&layer);
291 
292  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
293  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
294 
295  result = layerSupportObject->IsElementwiseUnarySupported(OverrideDataType(input, dataType),
296  OverrideDataType(output, dataType),
297  cLayer->GetParameters(),
298  reason);
299  break;
300  }
302  {
303  auto cLayer = boost::polymorphic_downcast<const FakeQuantizationLayer*>(&layer);
304  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
305  result = layerSupportObject->IsFakeQuantizationSupported(OverrideDataType(input, dataType),
306  cLayer->GetParameters(),
307  reason);
308  break;
309  }
310  case LayerType::Floor:
311  {
312  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
313  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
314  result = layerSupportObject->IsFloorSupported(OverrideDataType(input, dataType),
315  OverrideDataType(output, dataType),
316  reason);
317  break;
318  }
320  {
321  auto cLayer = boost::polymorphic_downcast<const FullyConnectedLayer*>(&layer);
322  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
323  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
324  BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
325 
326  TensorInfo biasInfo;
327  const TensorInfo * biasInfoPtr = nullptr;
328  static const TensorInfo dummyBFloat16Bias(TensorShape({1,1,1,1}), DataType::BFloat16);
329  static const TensorInfo dummyFloat16Bias(TensorShape({1,1,1,1}), DataType::Float16);
330  static const TensorInfo dummyFloat32Bias(TensorShape({1,1,1,1}), DataType::Float32);
331  static const TensorInfo dummyQA8Bias(TensorShape({1,1,1,1}), DataType::Signed32);
332 
333  const FullyConnectedDescriptor& descriptor = cLayer->GetParameters();
334  if (descriptor.m_BiasEnabled)
335  {
336  BOOST_ASSERT(cLayer->m_Bias.get() != nullptr);
337  biasInfo = OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
338  biasInfoPtr = &biasInfo;
339  }
340  else
341  {
342  // If biases are not enabled pass a dummy tensorinfo for the validation
343  switch(input.GetDataType())
344  {
345  case DataType::BFloat16:
346  {
347  biasInfoPtr = &dummyBFloat16Bias;
348  break;
349  }
350  case DataType::Float16:
351  {
352  biasInfoPtr = &dummyFloat16Bias;
353  break;
354  }
355  case DataType::Float32:
356  {
357  biasInfoPtr = &dummyFloat32Bias;
358  break;
359  }
360  case DataType::QAsymmU8:
361  case DataType::QAsymmS8:
362  case DataType::QSymmS8:
363  case DataType::QSymmS16:
364  {
365  biasInfoPtr = &dummyQA8Bias;
366  break;
367  }
368  default:
369  {
370  BOOST_ASSERT_MSG(false, "Unexpected bias type");
371  }
372  }
373  }
374 
375  result = layerSupportObject->IsFullyConnectedSupported(
376  OverrideDataType(input, dataType),
377  OverrideDataType(output, dataType),
378  OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
379  *biasInfoPtr,
380  descriptor,
381  reason);
382  break;
383  }
384  case LayerType::Gather:
385  {
386  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
387  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
388  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
389  result = layerSupportObject->IsGatherSupported(OverrideDataType(input0, dataType),
390  input1,
391  OverrideDataType(output, dataType),
392  reason);
393  break;
394  }
395  case LayerType::Input:
396  {
397  const TensorInfo& input = layer.GetOutputSlot(0).GetTensorInfo();
398  result = layerSupportObject->IsInputSupported(OverrideDataType(input, dataType), reason);
399  break;
400  }
402  {
403  auto cLayer = boost::polymorphic_downcast<const InstanceNormalizationLayer*>(&layer);
404  const InstanceNormalizationDescriptor& descriptor = cLayer->GetParameters();
405 
406  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
407  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
408 
409  result = layerSupportObject->IsInstanceNormalizationSupported(
410  OverrideDataType(input, dataType),
411  OverrideDataType(output, dataType),
412  descriptor,
413  reason);
414  break;
415  }
417  {
418  auto cLayer = boost::polymorphic_downcast<const L2NormalizationLayer*>(&layer);
419  const L2NormalizationDescriptor& descriptor = cLayer->GetParameters();
420 
421  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
422  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
423 
424  result = layerSupportObject->IsL2NormalizationSupported(
425  OverrideDataType(input, dataType),
426  OverrideDataType(output, dataType),
427  descriptor,
428  reason);
429  break;
430  }
432  {
433  auto cLayer = boost::polymorphic_downcast<const LogSoftmaxLayer*>(&layer);
434 
435  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
436  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
437 
438  result = layerSupportObject->IsLogSoftmaxSupported(OverrideDataType(input, dataType),
439  OverrideDataType(output, dataType),
440  cLayer->GetParameters(),
441  reason);
442  break;
443  }
444  case LayerType::Lstm:
445  {
446  auto cLayer = boost::polymorphic_downcast<const LstmLayer*>(&layer);
447  const LstmDescriptor& descriptor = cLayer->GetParameters();
448 
449  // All inputs.
450  const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
451  dataType);
452  const TensorInfo& outputStateIn = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
453  dataType);
454  const TensorInfo& cellStateIn = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
455  dataType);
456  // All outputs
457  const TensorInfo& scratchBuffer = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
458  const TensorInfo& outputStateOut = OverrideDataType(layer.GetOutputSlot(1).GetTensorInfo(), dataType);
459  const TensorInfo& cellStateOut = OverrideDataType(layer.GetOutputSlot(2).GetTensorInfo(), dataType);
460  const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(3).GetTensorInfo(), dataType);
461 
462  // Basic parameters
463  const TensorInfo& inputToForgetWeights
464  = OverrideDataType(cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), dataType);
465  const TensorInfo& inputToCellWeights
466  = OverrideDataType(cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), dataType);
467  const TensorInfo& inputToOutputWeights
468  = OverrideDataType(cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), dataType);
469  const TensorInfo& recurrentToForgetWeights
470  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), dataType);
471  const TensorInfo& recurrentToCellWeights
472  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), dataType);
473  const TensorInfo& recurrentToOutputWeights
474  = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), dataType);
475  const TensorInfo& forgetGateBias
476  = OverrideDataType(cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), dataType);
477  const TensorInfo& cellBias
478  = OverrideDataType(cLayer->m_BasicParameters.m_CellBias->GetTensorInfo(), dataType);
479  const TensorInfo& outputGateBias
480  = OverrideDataType(cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo(), dataType);
481 
482  LstmInputParamsInfo paramsInfo;
483 
484  paramsInfo.m_InputToForgetWeights = &inputToForgetWeights;
485  paramsInfo.m_InputToCellWeights = &inputToCellWeights;
486  paramsInfo.m_InputToOutputWeights = &inputToOutputWeights;
487  paramsInfo.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
488  paramsInfo.m_RecurrentToCellWeights = &recurrentToCellWeights;
489  paramsInfo.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
490  paramsInfo.m_ForgetGateBias = &forgetGateBias;
491  paramsInfo.m_CellBias = &cellBias;
492  paramsInfo.m_OutputGateBias = &outputGateBias;
493 
494 
495  // Optional parameters
496  TensorInfo optInputToInputWeights;
497  TensorInfo optRecurrentToInputWeights;
498  TensorInfo optCellToInputWeights;
499  TensorInfo optInputGateBias;
500  TensorInfo optProjectionWeights;
501  TensorInfo optProjectionBias;
502  TensorInfo optCellToForgetWeights;
503  TensorInfo optCellToOutputWeights;
504  TensorInfo optInputLayerNormWeights;
505  TensorInfo optForgetLayerNormWeights;
506  TensorInfo optCellLayerNormWeights;
507  TensorInfo optOutputLayerNormWeights;
508 
509  if(!descriptor.m_CifgEnabled)
510  {
511  optInputToInputWeights =
512  OverrideDataType(cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), dataType);
513  paramsInfo.m_InputToInputWeights = &optInputToInputWeights;
514 
515  optRecurrentToInputWeights =
516  OverrideDataType(cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), dataType);
517  paramsInfo.m_RecurrentToInputWeights = &optRecurrentToInputWeights;
518  if (cLayer->m_CifgParameters.m_CellToInputWeights != nullptr)
519  {
520  optCellToInputWeights =
521  OverrideDataType(cLayer->m_CifgParameters.m_CellToInputWeights->GetTensorInfo(), dataType);
522  paramsInfo.m_CellToInputWeights = &optCellToInputWeights;
523  }
524  optInputGateBias =
525  OverrideDataType(cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo(), dataType);
526  paramsInfo.m_InputGateBias = &optInputGateBias;
527  }
528 
529  if(descriptor.m_ProjectionEnabled)
530  {
531  optProjectionWeights =
532  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), dataType);
533  paramsInfo.m_ProjectionWeights = &optProjectionWeights;
534  if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
535  {
536  optProjectionBias =
537  OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), dataType);
538  paramsInfo.m_ProjectionBias = &optProjectionBias;
539  }
540  }
541 
542  if(descriptor.m_PeepholeEnabled)
543  {
544  optCellToForgetWeights =
545  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), dataType);
546  paramsInfo.m_CellToForgetWeights = &optCellToForgetWeights;
547  optCellToOutputWeights =
548  OverrideDataType(cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), dataType);
549  paramsInfo.m_CellToOutputWeights = &optCellToOutputWeights;
550  }
551 
552  if(descriptor.m_LayerNormEnabled)
553  {
554  if (!descriptor.m_CifgEnabled)
555  {
556  optInputLayerNormWeights = OverrideDataType(
557  cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(), dataType);
558  paramsInfo.m_InputLayerNormWeights = &optInputLayerNormWeights;
559  }
560 
561  optForgetLayerNormWeights = OverrideDataType(
562  cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(), dataType);
563  paramsInfo.m_ForgetLayerNormWeights = &optForgetLayerNormWeights;
564 
565  optCellLayerNormWeights = OverrideDataType(
566  cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(), dataType);
567  paramsInfo.m_CellLayerNormWeights = &optCellLayerNormWeights;
568 
569  optOutputLayerNormWeights = OverrideDataType(
570  cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(), dataType);
571  paramsInfo.m_OutputLayerNormWeights = &optOutputLayerNormWeights;
572  }
573 
574  result = layerSupportObject->IsLstmSupported(
575  input,
576  outputStateIn,
577  cellStateIn,
578  scratchBuffer,
579  outputStateOut,
580  cellStateOut,
581  output,
582  descriptor,
583  paramsInfo,
584  reason);
585  break;
586  }
587  case LayerType::Maximum:
588  {
589  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
590  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
591  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
592 
593  result = layerSupportObject->IsMaximumSupported(OverrideDataType(input0, dataType),
594  OverrideDataType(input1, dataType),
595  OverrideDataType(output, dataType),
596  reason);
597  break;
598  }
599  case LayerType::MemCopy:
600  {
601  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
602  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
603 
604  result = layerSupportObject->IsMemCopySupported(OverrideDataType(input, dataType),
605  OverrideDataType(output, dataType),
606  reason);
607  break;
608  }
610  {
611  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
612  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
613 
614  result = layerSupportObject->IsMemImportSupported(OverrideDataType(input, dataType),
615  OverrideDataType(output, dataType),
616  reason);
617  break;
618  }
619  case LayerType::Merge:
620  {
621  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
622  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
623  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
624 
625  result = layerSupportObject->IsMergeSupported(OverrideDataType(input0, dataType),
626  OverrideDataType(input1, dataType),
627  OverrideDataType(output, dataType),
628  reason);
629  break;
630  }
631  case LayerType::Concat:
632  {
633  auto cLayer = boost::polymorphic_downcast<const ConcatLayer*>(&layer);
634 
635  // Get vector of all inputs.
636  auto getTensorInfo = [&dataType](const InputSlot& slot)
637  {
638  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
639  };
640  auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfo);
641  auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfo);
642  std::vector<TensorInfo> inputs(beginI, endI);
643 
644  auto getTensorInfoPtr = [](const TensorInfo& info)
645  {
646  return &info;
647  };
648  auto beginPtr = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr);
649  auto endPtr = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr);
650  std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
651 
652  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
653 
654  result = layerSupportObject->IsConcatSupported(inputPtrs, output, cLayer->GetParameters(), reason);
655 
656 
657  break;
658  }
660  {
661  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
662  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
663  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
664  result = layerSupportObject->IsMultiplicationSupported(
665  OverrideDataType(input0, dataType),
666  OverrideDataType(input1, dataType),
667  OverrideDataType(output, dataType),
668  reason);
669  break;
670  }
672  {
673  auto cLayer = boost::polymorphic_downcast<const NormalizationLayer*>(&layer);
674  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
675  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
676  result = layerSupportObject->IsNormalizationSupported(OverrideDataType(input, dataType),
677  OverrideDataType(output, dataType),
678  cLayer->GetParameters(),
679  reason);
680  break;
681  }
682  case LayerType::Output:
683  {
684  const TensorInfo& output = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
685  result = layerSupportObject->IsOutputSupported(OverrideDataType(output, dataType), reason);
686  break;
687  }
688  case LayerType::Permute:
689  {
690  auto cLayer = boost::polymorphic_downcast<const PermuteLayer*>(&layer);
691  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
692  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
693  result = layerSupportObject->IsPermuteSupported(OverrideDataType(input, dataType),
694  OverrideDataType(output, dataType),
695  cLayer->GetParameters(),
696  reason);
697  break;
698  }
699  case LayerType::Pad:
700  {
701  auto cLayer = boost::polymorphic_downcast<const PadLayer*>(&layer);
702  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
703  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
704  result = layerSupportObject->IsPadSupported(
705  OverrideDataType(input, dataType),
706  OverrideDataType(output, dataType),
707  cLayer->GetParameters(),
708  reason);
709  break;
710  }
712  {
713  auto cLayer = boost::polymorphic_downcast<const Pooling2dLayer*>(&layer);
714  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
715  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
716  result = layerSupportObject->IsPooling2dSupported(OverrideDataType(input, dataType),
717  OverrideDataType(output, dataType),
718  cLayer->GetParameters(),
719  reason);
720  break;
721  }
723  {
724  auto cLayer = boost::polymorphic_downcast<const PreCompiledLayer*>(&layer);
725  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
726  result = layerSupportObject->IsPreCompiledSupported(OverrideDataType(input, dataType),
727  cLayer->GetParameters(),
728  reason);
729  break;
730  }
731  case LayerType::Quantize:
732  {
733  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
734  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
735  result = layerSupportObject->IsQuantizeSupported(input, output, reason);
736  break;
737  }
739  {
740  auto cLayer = boost::polymorphic_downcast<const QuantizedLstmLayer*>(&layer);
741 
742  // Inputs
743  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
744  const TensorInfo& previousCellStateIn = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
745  const TensorInfo& previousOutputIn = layer.GetInputSlot(2).GetConnection()->GetTensorInfo();
746 
747  // Outputs
748  const TensorInfo& cellStateOut = layer.GetOutputSlot(0).GetTensorInfo();
749  const TensorInfo& output = layer.GetOutputSlot(1).GetTensorInfo();
750 
751  // QuantizedLstm parameters
752  QuantizedLstmInputParamsInfo paramsInfo;
753 
754  paramsInfo.m_InputToInputWeights =
755  &cLayer->m_QuantizedLstmParameters.m_InputToInputWeights->GetTensorInfo();
756  paramsInfo.m_InputToForgetWeights =
757  &cLayer->m_QuantizedLstmParameters.m_InputToForgetWeights->GetTensorInfo();
758  paramsInfo.m_InputToCellWeights =
759  &cLayer->m_QuantizedLstmParameters.m_InputToCellWeights->GetTensorInfo();
760  paramsInfo.m_InputToOutputWeights =
761  &cLayer->m_QuantizedLstmParameters.m_InputToOutputWeights->GetTensorInfo();
762 
763  paramsInfo.m_RecurrentToInputWeights =
764  &cLayer->m_QuantizedLstmParameters.m_RecurrentToInputWeights->GetTensorInfo();
765  paramsInfo.m_RecurrentToForgetWeights =
766  &cLayer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights->GetTensorInfo();
767  paramsInfo.m_RecurrentToCellWeights =
768  &cLayer->m_QuantizedLstmParameters.m_RecurrentToCellWeights->GetTensorInfo();
769  paramsInfo.m_RecurrentToOutputWeights =
770  &cLayer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights->GetTensorInfo();
771 
772  paramsInfo.m_InputGateBias =
773  &cLayer->m_QuantizedLstmParameters.m_InputGateBias->GetTensorInfo();
774  paramsInfo.m_ForgetGateBias =
775  &cLayer->m_QuantizedLstmParameters.m_ForgetGateBias->GetTensorInfo();
776  paramsInfo.m_CellBias =
777  &cLayer->m_QuantizedLstmParameters.m_CellBias->GetTensorInfo();
778  paramsInfo.m_OutputGateBias =
779  &cLayer->m_QuantizedLstmParameters.m_OutputGateBias->GetTensorInfo();;
780 
781  result = layerSupportObject->IsQuantizedLstmSupported(input,
782  previousCellStateIn,
783  previousOutputIn,
784  cellStateOut,
785  output,
786  paramsInfo,
787  reason);
788  break;
789  }
790  case LayerType::Division:
791  {
792  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
793  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
794  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
795  result = layerSupportObject->IsDivisionSupported(
796  OverrideDataType(input0, dataType),
797  OverrideDataType(input1, dataType),
798  OverrideDataType(output, dataType),
799  reason);
800  break;
801  }
802  case LayerType::Reshape:
803  {
804  auto cLayer = boost::polymorphic_downcast<const ReshapeLayer*>(&layer);
805  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
806  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
807  result = layerSupportObject->IsReshapeSupported(OverrideDataType(input, dataType),
808  OverrideDataType(output, dataType),
809  cLayer->GetParameters(),
810  reason);
811  break;
812  }
813  case LayerType::Resize:
814  {
815  auto cLayer = boost::polymorphic_downcast<const ResizeLayer*>(&layer);
816  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
817  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
818  result = layerSupportObject->IsResizeSupported(OverrideDataType(input, dataType),
819  OverrideDataType(output, dataType),
820  cLayer->GetParameters(),
821  reason);
822  break;
823  }
824  case LayerType::Slice:
825  {
826  auto cLayer = boost::polymorphic_downcast<const SliceLayer*>(&layer);
827 
828  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
829  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
830 
831  result = layerSupportObject->IsSliceSupported(OverrideDataType(input, dataType),
832  OverrideDataType(output, dataType),
833  cLayer->GetParameters(),
834  reason);
835  break;
836  }
837  case LayerType::Softmax:
838  {
839  auto cLayer = boost::polymorphic_downcast<const SoftmaxLayer*>(&layer);
840  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
841  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
842  result = layerSupportObject->IsSoftmaxSupported(OverrideDataType(input, dataType),
843  OverrideDataType(output, dataType),
844  cLayer->GetParameters(),
845  reason);
846  break;
847  }
849  {
850  auto cLayer = boost::polymorphic_downcast<const SpaceToBatchNdLayer*>(&layer);
851  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
852  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
853  result = layerSupportObject->IsSpaceToBatchNdSupported(OverrideDataType(input, dataType),
854  OverrideDataType(output, dataType),
855  cLayer->GetParameters(),
856  reason);
857  break;
858  }
860  {
861  auto cLayer = boost::polymorphic_downcast<const SpaceToDepthLayer*>(&layer);
862 
863  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
864  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
865 
866  result = layerSupportObject->IsSpaceToDepthSupported(OverrideDataType(input, dataType),
867  OverrideDataType(output, dataType),
868  cLayer->GetParameters(),
869  reason);
870  break;
871  }
872  case LayerType::Splitter:
873  {
874  auto cLayer = boost::polymorphic_downcast<const SplitterLayer*>(&layer);
875  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
876 
877  // Get vector of all outputs.
878  auto getTensorInfo = [&dataType](const OutputSlot& slot)
879  {
880  return OverrideDataType(slot.GetTensorInfo(), dataType);
881  };
882  auto beginI = boost::make_transform_iterator(layer.GetOutputSlots().begin(), getTensorInfo);
883  auto endI = boost::make_transform_iterator(layer.GetOutputSlots().end(), getTensorInfo);
884  std::vector<TensorInfo> outputs(beginI, endI);
885 
886  const std::vector<std::reference_wrapper<TensorInfo>> outputPtrs(outputs.begin(), outputs.end());
887 
888  result = layerSupportObject->IsSplitterSupported(OverrideDataType(input, dataType),
889  outputPtrs,
890  cLayer->GetParameters(),
891  reason);
892  break;
893  }
894  case LayerType::Stack:
895  {
896  auto cLayer = boost::polymorphic_downcast<const StackLayer*>(&layer);
897 
898  // Get vector of all inputs.
899  auto getTensorInfo = [&dataType](const InputSlot& slot)
900  {
901  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
902  };
903  auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfo);
904  auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfo);
905  std::vector<TensorInfo> inputs(beginI, endI);
906 
907  auto getTensorInfoPtr = [](const TensorInfo& info)
908  {
909  return &info;
910  };
911  auto beginPtr = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr);
912  auto endPtr = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr);
913  std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
914 
915  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
916 
917  result = layerSupportObject->IsStackSupported(inputPtrs, output, cLayer->GetParameters(), reason);
918 
919  break;
920  }
921  case LayerType::StandIn:
922  {
923  auto cLayer = boost::polymorphic_downcast<const StandInLayer*>(&layer);
924 
925  // Get vector of all inputs.
926  auto getTensorInfoIn = [&dataType](const InputSlot& slot)
927  {
928  return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
929  };
930  auto getTensorInfoOut = [&dataType](const OutputSlot& slot)
931  {
932  return OverrideDataType(slot.GetTensorInfo(), dataType);
933  };
934  auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfoIn);
935  auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfoIn);
936  std::vector<TensorInfo> inputs(beginI, endI);
937 
938  auto beginO = boost::make_transform_iterator(layer.GetOutputSlots().begin(), getTensorInfoOut);
939  auto endO = boost::make_transform_iterator(layer.GetOutputSlots().end(), getTensorInfoOut);
940  std::vector<TensorInfo> outputs(beginO, endO);
941 
942 
943  auto getTensorInfoPtr = [](const TensorInfo& info)
944  {
945  return &info;
946  };
947  auto beginPtrI = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr);
948  auto endPtrI = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr);
949  std::vector<const TensorInfo*> inputPtrs(beginPtrI, endPtrI);
950 
951  auto beginPtrO = boost::make_transform_iterator(outputs.begin(), getTensorInfoPtr);
952  auto endPtrO = boost::make_transform_iterator(outputs.end(), getTensorInfoPtr);
953  std::vector<const TensorInfo*> outputPtrs(beginPtrO, endPtrO);
954 
955 
956  result = layerSupportObject->IsStandInSupported(inputPtrs,
957  outputPtrs,
958  cLayer->GetParameters(),
959  reason);
960  break;
961  }
963  {
964  auto cLayer = boost::polymorphic_downcast<const StridedSliceLayer*>(&layer);
965  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
966  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
967  result = layerSupportObject->IsStridedSliceSupported(OverrideDataType(input, dataType),
968  OverrideDataType(output, dataType),
969  cLayer->GetParameters(),
970  reason);
971  break;
972  }
974  {
975  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
976  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
977  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
978  result = layerSupportObject->IsSubtractionSupported(
979  OverrideDataType(input0, dataType),
980  OverrideDataType(input1, dataType),
981  OverrideDataType(output, dataType),
982  reason);
983  break;
984  }
985  case LayerType::Switch:
986  {
987  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
988  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
989  const TensorInfo& output0 = layer.GetOutputSlot(0).GetTensorInfo();
990  const TensorInfo& output1 = layer.GetOutputSlot(1).GetTensorInfo();
991  result = layerSupportObject->IsSwitchSupported(OverrideDataType(input0, dataType),
992  OverrideDataType(input1, dataType),
993  OverrideDataType(output0, dataType),
994  OverrideDataType(output1, dataType),
995  reason);
996  break;
997  }
998  case LayerType::Mean:
999  {
1000  auto cLayer = boost::polymorphic_downcast<const MeanLayer*>(&layer);
1001  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1002  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1003  result = layerSupportObject->IsMeanSupported(
1004  OverrideDataType(input, dataType),
1005  OverrideDataType(output, dataType),
1006  cLayer->GetParameters(),
1007  reason);
1008  break;
1009  }
1010  case LayerType::Minimum:
1011  {
1012  const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1013  const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1014  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1015  result = layerSupportObject->IsMinimumSupported(OverrideDataType(input0, dataType),
1016  OverrideDataType(input1, dataType),
1017  OverrideDataType(output, dataType),
1018  reason);
1019  break;
1020  }
1021  case LayerType::Prelu:
1022  {
1023  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1024  const TensorInfo& alpha = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1025  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1026  result = layerSupportObject->IsPreluSupported(OverrideDataType(input, dataType),
1027  OverrideDataType(alpha, dataType),
1028  OverrideDataType(output, dataType),
1029  reason);
1030  break;
1031  }
1032  case LayerType::Transpose:
1033  {
1034  auto cLayer = boost::polymorphic_downcast<const TransposeLayer*>(&layer);
1035  const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1036  const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1037  result = layerSupportObject->IsTransposeSupported(OverrideDataType(input, dataType),
1038  OverrideDataType(output, dataType),
1039  cLayer->GetParameters(),
1040  reason);
1041  break;
1042  }
1044  {
1045  auto cLayer = boost::polymorphic_downcast<const TransposeConvolution2dLayer*>(&layer);
1046 
1047  const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
1048  dataType);
1049  const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
1050 
1051  const TransposeConvolution2dDescriptor& descriptor = cLayer->GetParameters();
1052 
1053  Optional<TensorInfo> biases;
1054  if (descriptor.m_BiasEnabled)
1055  {
1056  BOOST_ASSERT(cLayer->m_Bias.get() != nullptr);
1057  biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(),
1058  GetBiasTypeFromWeightsType(dataType));
1059  }
1060 
1061  BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
1062  const TensorInfo weights = OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType);
1063 
1064  result = layerSupportObject->IsTransposeConvolution2dSupported(input,
1065  output,
1066  descriptor,
1067  weights,
1068  biases,
1069  reason);
1070 
1071  break;
1072  }
1073  default:
1074  {
1075  BOOST_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
1076  reason.value() = "Unrecognised layer type";
1077  result = false;
1078  break;
1079  }
1080  }
1081  return result;
1082 }
+ + + + + + + + + + + + + + +
BackendRegistry & BackendRegistryInstance()
+ + + +
std::vector< float > boxEncodings({ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f })
+ + + + + + +
armnn::Optional< armnn::DataType > GetBiasTypeFromWeightsType(armnn::Optional< armnn::DataType > weightsType)
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
std::vector< float > scores({ 0.0f, 0.9f, 0.8f, 0.0f, 0.75f, 0.72f, 0.0f, 0.6f, 0.5f, 0.0f, 0.93f, 0.95f, 0.0f, 0.5f, 0.4f, 0.0f, 0.3f, 0.2f })
+ + + + + + + + + + + + + + + +
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })
+
+
+
+ +

◆ IsLayerSupported() [2/2]

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
bool IsLayerSupported (const IConnectableLayerlayer,
Optional< DataTypedataType,
std::string & outReasonIfUnsupported 
)
+
+static
+
+ +

Definition at line 1084 of file WorkloadFactory.cpp.

+ +

References IWorkloadFactory::IsLayerSupported().

+
1087 {
1088  auto layer = boost::polymorphic_downcast<const Layer*>(&connectableLayer);
1089  return IsLayerSupported(layer->GetBackendId(), connectableLayer, dataType, outReasonIfUnsupported);
1090 }
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
+
+
+
+ +

◆ SupportsSubTensors()

+ + +
The documentation for this class was generated from the following files: +
+
+ + + + -- cgit v1.2.1