aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2022-01-07 09:47:29 +0000
committerTeresaARM <teresa.charlinreyes@arm.com>2022-01-18 13:15:41 +0000
commit611c7fb97412230d5cefee047081455fb60db06c (patch)
tree1244050b50a300f67285aef00d8b05c52865b89a
parent53e06599a3af44db90c37d1cda34fc85ec9c27fa (diff)
downloadarmnn-611c7fb97412230d5cefee047081455fb60db06c.tar.gz
IVGCVSW-6641 Stabilize the IWorkloadFactory interface with unified strategy
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: Ia941be9bf2c15fe56e49a9b9a2bbe943a8152438
-rw-r--r--include/armnn/backends/WorkloadData.hpp6
-rw-r--r--include/armnn/backends/WorkloadFactory.hpp142
-rw-r--r--src/armnn/layers/AbsLayer.cpp5
-rw-r--r--src/armnn/layers/ActivationLayer.cpp2
-rw-r--r--src/armnn/layers/AdditionLayer.cpp2
-rw-r--r--src/armnn/layers/ArgMinMaxLayer.cpp2
-rw-r--r--src/armnn/layers/BatchNormalizationLayer.cpp2
-rw-r--r--src/armnn/layers/BatchToSpaceNdLayer.cpp2
-rw-r--r--src/armnn/layers/CastLayer.cpp2
-rw-r--r--src/armnn/layers/ChannelShuffleLayer.cpp2
-rw-r--r--src/armnn/layers/ComparisonLayer.cpp2
-rw-r--r--src/armnn/layers/ConcatLayer.cpp2
-rw-r--r--src/armnn/layers/ConstantLayer.cpp2
-rw-r--r--src/armnn/layers/ConvertBf16ToFp32Layer.cpp2
-rw-r--r--src/armnn/layers/ConvertFp16ToFp32Layer.cpp2
-rw-r--r--src/armnn/layers/ConvertFp32ToBf16Layer.cpp2
-rw-r--r--src/armnn/layers/ConvertFp32ToFp16Layer.cpp2
-rw-r--r--src/armnn/layers/Convolution2dLayer.cpp2
-rw-r--r--src/armnn/layers/Convolution3dLayer.cpp2
-rw-r--r--src/armnn/layers/DebugLayer.cpp2
-rw-r--r--src/armnn/layers/DepthToSpaceLayer.cpp2
-rw-r--r--src/armnn/layers/DepthwiseConvolution2dLayer.cpp2
-rw-r--r--src/armnn/layers/DequantizeLayer.cpp2
-rw-r--r--src/armnn/layers/DetectionPostProcessLayer.cpp2
-rw-r--r--src/armnn/layers/DivisionLayer.cpp2
-rw-r--r--src/armnn/layers/ElementwiseUnaryLayer.cpp8
-rw-r--r--src/armnn/layers/FakeQuantizationLayer.cpp2
-rw-r--r--src/armnn/layers/FillLayer.cpp2
-rw-r--r--src/armnn/layers/FloorLayer.cpp2
-rw-r--r--src/armnn/layers/FullyConnectedLayer.cpp2
-rw-r--r--src/armnn/layers/GatherLayer.cpp2
-rw-r--r--src/armnn/layers/InstanceNormalizationLayer.cpp2
-rw-r--r--src/armnn/layers/L2NormalizationLayer.cpp2
-rw-r--r--src/armnn/layers/LogSoftmaxLayer.cpp2
-rw-r--r--src/armnn/layers/LogicalBinaryLayer.cpp2
-rw-r--r--src/armnn/layers/LstmLayer.cpp2
-rw-r--r--src/armnn/layers/MaximumLayer.cpp2
-rw-r--r--src/armnn/layers/MeanLayer.cpp2
-rw-r--r--src/armnn/layers/MinimumLayer.cpp2
-rw-r--r--src/armnn/layers/MultiplicationLayer.cpp2
-rw-r--r--src/armnn/layers/NormalizationLayer.cpp2
-rw-r--r--src/armnn/layers/PadLayer.cpp2
-rw-r--r--src/armnn/layers/PermuteLayer.cpp2
-rw-r--r--src/armnn/layers/Pooling2dLayer.cpp2
-rw-r--r--src/armnn/layers/Pooling3dLayer.cpp2
-rw-r--r--src/armnn/layers/PreCompiledLayer.cpp2
-rw-r--r--src/armnn/layers/PreluLayer.cpp2
-rw-r--r--src/armnn/layers/QLstmLayer.cpp2
-rw-r--r--src/armnn/layers/QuantizeLayer.cpp2
-rw-r--r--src/armnn/layers/QuantizedLstmLayer.cpp2
-rw-r--r--src/armnn/layers/RankLayer.cpp2
-rw-r--r--src/armnn/layers/ReduceLayer.cpp2
-rw-r--r--src/armnn/layers/ReshapeLayer.cpp2
-rw-r--r--src/armnn/layers/ResizeLayer.cpp2
-rw-r--r--src/armnn/layers/RsqrtLayer.cpp5
-rw-r--r--src/armnn/layers/ShapeLayer.cpp2
-rw-r--r--src/armnn/layers/SliceLayer.cpp2
-rw-r--r--src/armnn/layers/SoftmaxLayer.cpp2
-rw-r--r--src/armnn/layers/SpaceToBatchNdLayer.cpp2
-rw-r--r--src/armnn/layers/SpaceToDepthLayer.cpp2
-rw-r--r--src/armnn/layers/SplitterLayer.cpp2
-rw-r--r--src/armnn/layers/StackLayer.cpp2
-rw-r--r--src/armnn/layers/StridedSliceLayer.cpp2
-rw-r--r--src/armnn/layers/SubtractionLayer.cpp2
-rw-r--r--src/armnn/layers/SwitchLayer.cpp2
-rw-r--r--src/armnn/layers/TransposeConvolution2dLayer.cpp2
-rw-r--r--src/armnn/layers/TransposeLayer.cpp2
-rw-r--r--src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp2
-rw-r--r--src/backends/aclCommon/test/MemCopyTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp360
-rw-r--r--src/backends/backendsCommon/WorkloadFactoryBase.hpp16
-rw-r--r--src/backends/backendsCommon/test/DynamicBackendTests.hpp4
-rw-r--r--src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/test/WorkloadDataValidation.cpp3
-rw-r--r--src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp21
-rw-r--r--src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp21
-rw-r--r--src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp3
-rw-r--r--src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp12
-rw-r--r--src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp3
-rw-r--r--src/backends/backendsCommon/test/layerTests/CastTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/ChannelShuffleTestImpl.cpp3
-rw-r--r--src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp3
-rw-r--r--src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp17
-rw-r--r--src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp44
-rw-r--r--src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/FillTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp3
-rw-r--r--src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp8
-rw-r--r--src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp18
-rw-r--r--src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/MeanTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.cpp12
-rw-r--r--src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp8
-rw-r--r--src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp18
-rw-r--r--src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp16
-rw-r--r--src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp10
-rw-r--r--src/backends/backendsCommon/test/layerTests/Pooling3dTestImpl.cpp10
-rw-r--r--src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/RankTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/ReduceProdTestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/ReduceSumTestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/ReductionTestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/ShapeTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp8
-rw-r--r--src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp12
-rw-r--r--src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp6
-rw-r--r--src/backends/backendsCommon/test/layerTests/TransposeTestImpl.hpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/UnidirectionalSequenceLstmTestImpl.cpp30
-rw-r--r--src/backends/cl/ClWorkloadFactory.cpp445
-rw-r--r--src/backends/cl/ClWorkloadFactory.hpp126
-rw-r--r--src/backends/cl/test/OpenClTimerTest.cpp2
-rw-r--r--src/backends/neon/NeonWorkloadFactory.cpp429
-rw-r--r--src/backends/neon/NeonWorkloadFactory.hpp132
-rw-r--r--src/backends/neon/test/NeonTimerTest.cpp3
-rw-r--r--src/backends/reference/RefWorkloadFactory.cpp507
-rw-r--r--src/backends/reference/RefWorkloadFactory.hpp138
144 files changed, 2653 insertions, 226 deletions
diff --git a/include/armnn/backends/WorkloadData.hpp b/include/armnn/backends/WorkloadData.hpp
index 7406547216..21141583c6 100644
--- a/include/armnn/backends/WorkloadData.hpp
+++ b/include/armnn/backends/WorkloadData.hpp
@@ -27,6 +27,8 @@ struct QueueDescriptor
std::vector<ITensorHandle*> m_Outputs;
void* m_AdditionalInfoObject;
+ virtual ~QueueDescriptor() = default;
+
void ValidateInputsOutputs(const std::string& descName,
unsigned int numExpectedIn,
unsigned int numExpectedOut) const;
@@ -38,7 +40,6 @@ struct QueueDescriptor
}
protected:
- ~QueueDescriptor() = default;
QueueDescriptor()
: m_AdditionalInfoObject(nullptr)
{}
@@ -52,8 +53,9 @@ struct QueueDescriptorWithParameters : public QueueDescriptor
{
LayerDescriptor m_Parameters;
+ virtual ~QueueDescriptorWithParameters() = default;
+
protected:
- ~QueueDescriptorWithParameters() = default;
QueueDescriptorWithParameters() = default;
QueueDescriptorWithParameters(QueueDescriptorWithParameters const&) = default;
QueueDescriptorWithParameters& operator=(QueueDescriptorWithParameters const&) = default;
diff --git a/include/armnn/backends/WorkloadFactory.hpp b/include/armnn/backends/WorkloadFactory.hpp
index 68ad2e3741..17e56f1b6c 100644
--- a/include/armnn/backends/WorkloadFactory.hpp
+++ b/include/armnn/backends/WorkloadFactory.hpp
@@ -68,212 +68,354 @@ public:
DataLayout dataLayout,
const bool IsMemoryManaged = true) const = 0;
+ virtual std::unique_ptr<IWorkload> CreateWorkload(LayerType type,
+ const QueueDescriptor& descriptor,
+ const WorkloadInfo& info) const;
+
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateActivation(const ActivationQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateAddition(const AdditionQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateArgMinMax(const ArgMinMaxQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateBatchNormalization(const BatchNormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
const WorkloadInfo& Info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateCast(const CastQueueDescriptor& descriptor,
const WorkloadInfo& Info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateChannelShuffle(const ChannelShuffleQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateComparison(const ComparisonQueueDescriptor& descriptor,
const WorkloadInfo& Info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateConcat(const ConcatQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateConstant(const ConstantQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateConvertBf16ToFp32(const ConvertBf16ToFp32QueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateConvertFp32ToBf16(const ConvertFp32ToBf16QueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateConvolution3d(const Convolution3dQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateDepthToSpace(const DepthToSpaceQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateDepthwiseConvolution2d(
const DepthwiseConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateDequantize(const DequantizeQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateDetectionPostProcess(
const DetectionPostProcessQueueDescriptor& descriptor, const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
const WorkloadInfo& Info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateFill(const FillQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateFloor(const FloorQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateFullyConnected(const FullyConnectedQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateInstanceNormalization(
const InstanceNormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateLogicalBinary(const LogicalBinaryQueueDescriptor& descriptor,
const WorkloadInfo& Info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateLogicalUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
const WorkloadInfo& Info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateLogSoftmax(const LogSoftmaxQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateLstm(const LstmQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateMaximum(const MaximumQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateMean(const MeanQueueDescriptor& descriptor,
const WorkloadInfo& Info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateMemImport(const MemImportQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateMerge(const MergeQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateMultiplication(const MultiplicationQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateNormalization(const NormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateOutput(const OutputQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreatePad(const PadQueueDescriptor& descriptor,
const WorkloadInfo& Info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreatePermute(const PermuteQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreatePooling3d(const Pooling3dQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreatePrelu(const PreluQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateQuantize(const QuantizeQueueDescriptor& descriptor,
const WorkloadInfo& Info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateQLstm(const QLstmQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateRank(const RankQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateReduce(const ReduceQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateReshape(const ReshapeQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateResize(const ResizeQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateShape(const ShapeQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateSlice(const SliceQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateSplitter(const SplitterQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateStack(const StackQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
const WorkloadInfo& Info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateSwitch(const SwitchQueueDescriptor& descriptor,
const WorkloadInfo& Info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateTranspose(const TransposeQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateTransposeConvolution2d(
const TransposeConvolution2dQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
virtual std::unique_ptr<IWorkload> CreateUnidirectionalSequenceLstm(
const UnidirectionalSequenceLstmQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
diff --git a/src/armnn/layers/AbsLayer.cpp b/src/armnn/layers/AbsLayer.cpp
index bc9e4f6d2a..13fa24aacf 100644
--- a/src/armnn/layers/AbsLayer.cpp
+++ b/src/armnn/layers/AbsLayer.cpp
@@ -21,10 +21,11 @@ AbsLayer::AbsLayer(const char* name)
std::unique_ptr<IWorkload> AbsLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
- AbsQueueDescriptor descriptor;
+ ElementwiseUnaryQueueDescriptor descriptor;
+ descriptor.m_Parameters.m_Operation = UnaryOperation::Abs;
SetAdditionalInfo(descriptor);
- return factory.CreateAbs(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::ElementwiseUnary, descriptor, PrepInfoAndDesc(descriptor));
}
AbsLayer* AbsLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/ActivationLayer.cpp b/src/armnn/layers/ActivationLayer.cpp
index 2cb1607204..63c98a93f6 100644
--- a/src/armnn/layers/ActivationLayer.cpp
+++ b/src/armnn/layers/ActivationLayer.cpp
@@ -22,7 +22,7 @@ std::unique_ptr<IWorkload> ActivationLayer::CreateWorkload(const IWorkloadFactor
ActivationQueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
- return factory.CreateActivation(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::Activation, descriptor, PrepInfoAndDesc(descriptor));
}
ActivationLayer* ActivationLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/AdditionLayer.cpp b/src/armnn/layers/AdditionLayer.cpp
index c3e98a13ca..f55bb55edd 100644
--- a/src/armnn/layers/AdditionLayer.cpp
+++ b/src/armnn/layers/AdditionLayer.cpp
@@ -24,7 +24,7 @@ std::unique_ptr<IWorkload> AdditionLayer::CreateWorkload(const IWorkloadFactory&
AdditionQueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
- return factory.CreateAddition(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::Addition, descriptor, PrepInfoAndDesc(descriptor));
}
AdditionLayer* AdditionLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/ArgMinMaxLayer.cpp b/src/armnn/layers/ArgMinMaxLayer.cpp
index dc69ef6a41..30db7ba803 100644
--- a/src/armnn/layers/ArgMinMaxLayer.cpp
+++ b/src/armnn/layers/ArgMinMaxLayer.cpp
@@ -26,7 +26,7 @@ std::unique_ptr<IWorkload> ArgMinMaxLayer::CreateWorkload(const IWorkloadFactory
ArgMinMaxQueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
- return factory.CreateArgMinMax(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::ArgMinMax, descriptor, PrepInfoAndDesc(descriptor));
}
ArgMinMaxLayer* ArgMinMaxLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/BatchNormalizationLayer.cpp b/src/armnn/layers/BatchNormalizationLayer.cpp
index e830b186b8..18d167f8cb 100644
--- a/src/armnn/layers/BatchNormalizationLayer.cpp
+++ b/src/armnn/layers/BatchNormalizationLayer.cpp
@@ -34,7 +34,7 @@ std::unique_ptr<IWorkload> BatchNormalizationLayer::CreateWorkload(const IWorklo
descriptor.m_Beta = m_Beta.get();
descriptor.m_Gamma = m_Gamma.get();
- return factory.CreateBatchNormalization(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::BatchNormalization, descriptor, PrepInfoAndDesc(descriptor));
}
BatchNormalizationLayer* BatchNormalizationLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/BatchToSpaceNdLayer.cpp b/src/armnn/layers/BatchToSpaceNdLayer.cpp
index ce602ad08f..485500d87d 100644
--- a/src/armnn/layers/BatchToSpaceNdLayer.cpp
+++ b/src/armnn/layers/BatchToSpaceNdLayer.cpp
@@ -33,7 +33,7 @@ std::unique_ptr<IWorkload> BatchToSpaceNdLayer::CreateWorkload(const IWorkloadFa
BatchToSpaceNdQueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
- return factory.CreateBatchToSpaceNd(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::BatchToSpaceNd, descriptor, PrepInfoAndDesc(descriptor));
}
BatchToSpaceNdLayer* BatchToSpaceNdLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/CastLayer.cpp b/src/armnn/layers/CastLayer.cpp
index c5cecb4f74..03b68659d1 100644
--- a/src/armnn/layers/CastLayer.cpp
+++ b/src/armnn/layers/CastLayer.cpp
@@ -23,7 +23,7 @@ std::unique_ptr<IWorkload> CastLayer::CreateWorkload(const IWorkloadFactory& fac
CastQueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
- return factory.CreateCast(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::Cast, descriptor, PrepInfoAndDesc(descriptor));
}
CastLayer* CastLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/ChannelShuffleLayer.cpp b/src/armnn/layers/ChannelShuffleLayer.cpp
index 5f4729ff51..78a2393a52 100644
--- a/src/armnn/layers/ChannelShuffleLayer.cpp
+++ b/src/armnn/layers/ChannelShuffleLayer.cpp
@@ -24,7 +24,7 @@ std::unique_ptr<IWorkload> ChannelShuffleLayer::CreateWorkload(const IWorkloadFa
ChannelShuffleQueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
- return factory.CreateChannelShuffle(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::ChannelShuffle, descriptor, PrepInfoAndDesc(descriptor));
}
ChannelShuffleLayer* ChannelShuffleLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/ComparisonLayer.cpp b/src/armnn/layers/ComparisonLayer.cpp
index 47430f1766..cf16386f6e 100644
--- a/src/armnn/layers/ComparisonLayer.cpp
+++ b/src/armnn/layers/ComparisonLayer.cpp
@@ -25,7 +25,7 @@ std::unique_ptr<IWorkload> ComparisonLayer::CreateWorkload(const IWorkloadFactor
ComparisonQueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
- return factory.CreateComparison(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::Comparison,descriptor, PrepInfoAndDesc(descriptor));
}
ComparisonLayer* ComparisonLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/ConcatLayer.cpp b/src/armnn/layers/ConcatLayer.cpp
index 98cb585322..b59e0b9a57 100644
--- a/src/armnn/layers/ConcatLayer.cpp
+++ b/src/armnn/layers/ConcatLayer.cpp
@@ -33,7 +33,7 @@ std::unique_ptr<IWorkload> ConcatLayer::CreateWorkload(const IWorkloadFactory& f
}
SetAdditionalInfo(descriptor);
- return factory.CreateConcat(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::Concat, descriptor, PrepInfoAndDesc(descriptor));
}
template<typename FactoryType>
diff --git a/src/armnn/layers/ConstantLayer.cpp b/src/armnn/layers/ConstantLayer.cpp
index 0ab92cc2fb..0c06dd5a54 100644
--- a/src/armnn/layers/ConstantLayer.cpp
+++ b/src/armnn/layers/ConstantLayer.cpp
@@ -24,7 +24,7 @@ std::unique_ptr<IWorkload> ConstantLayer::CreateWorkload(const IWorkloadFactory&
descriptor.m_LayerOutput = m_LayerOutput.get();
SetAdditionalInfo(descriptor);
- return factory.CreateConstant(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::Constant, descriptor, PrepInfoAndDesc(descriptor));
}
ConstantLayer* ConstantLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/ConvertBf16ToFp32Layer.cpp b/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
index e589008bd4..6d843f3129 100644
--- a/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
+++ b/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
@@ -24,7 +24,7 @@ std::unique_ptr<IWorkload> ConvertBf16ToFp32Layer::CreateWorkload(const IWorkloa
ConvertBf16ToFp32QueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
- return factory.CreateConvertBf16ToFp32(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::ConvertBf16ToFp32, descriptor, PrepInfoAndDesc(descriptor));
}
ConvertBf16ToFp32Layer* ConvertBf16ToFp32Layer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
index 656f59f192..cc3c8b18e1 100644
--- a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
+++ b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
@@ -24,7 +24,7 @@ std::unique_ptr<IWorkload> ConvertFp16ToFp32Layer::CreateWorkload(const IWorkloa
ConvertFp16ToFp32QueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
- return factory.CreateConvertFp16ToFp32(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::ConvertFp16ToFp32, descriptor, PrepInfoAndDesc(descriptor));
}
ConvertFp16ToFp32Layer* ConvertFp16ToFp32Layer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/ConvertFp32ToBf16Layer.cpp b/src/armnn/layers/ConvertFp32ToBf16Layer.cpp
index b2f4eb11d0..978fbd16da 100644
--- a/src/armnn/layers/ConvertFp32ToBf16Layer.cpp
+++ b/src/armnn/layers/ConvertFp32ToBf16Layer.cpp
@@ -24,7 +24,7 @@ std::unique_ptr<IWorkload> ConvertFp32ToBf16Layer::CreateWorkload(const IWorkloa
ConvertFp32ToBf16QueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
- return factory.CreateConvertFp32ToBf16(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::ConvertFp32ToBf16, descriptor, PrepInfoAndDesc(descriptor));
}
ConvertFp32ToBf16Layer* ConvertFp32ToBf16Layer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
index f5a8b01a23..2e1074a1b4 100644
--- a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
+++ b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
@@ -23,7 +23,7 @@ std::unique_ptr<IWorkload> ConvertFp32ToFp16Layer::CreateWorkload(const IWorkloa
ConvertFp32ToFp16QueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
- return factory.CreateConvertFp32ToFp16(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::ConvertFp32ToFp16, descriptor, PrepInfoAndDesc(descriptor));
}
ConvertFp32ToFp16Layer* ConvertFp32ToFp16Layer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/Convolution2dLayer.cpp b/src/armnn/layers/Convolution2dLayer.cpp
index 1c60f17b96..68e1cb5339 100644
--- a/src/armnn/layers/Convolution2dLayer.cpp
+++ b/src/armnn/layers/Convolution2dLayer.cpp
@@ -63,7 +63,7 @@ std::unique_ptr<IWorkload> Convolution2dLayer::CreateWorkload(const IWorkloadFac
SetAdditionalInfo(descriptor);
- return factory.CreateConvolution2d(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::Convolution2d, descriptor, PrepInfoAndDesc(descriptor));
}
Convolution2dLayer* Convolution2dLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/Convolution3dLayer.cpp b/src/armnn/layers/Convolution3dLayer.cpp
index cc33eae613..42b275e055 100644
--- a/src/armnn/layers/Convolution3dLayer.cpp
+++ b/src/armnn/layers/Convolution3dLayer.cpp
@@ -50,7 +50,7 @@ std::unique_ptr<IWorkload> Convolution3dLayer::CreateWorkload(const IWorkloadFac
Convolution3dQueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
- return factory.CreateConvolution3d(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::Convolution3d, descriptor, PrepInfoAndDesc(descriptor));
}
Convolution3dLayer* Convolution3dLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/DebugLayer.cpp b/src/armnn/layers/DebugLayer.cpp
index 96538feb93..90a55cbc40 100644
--- a/src/armnn/layers/DebugLayer.cpp
+++ b/src/armnn/layers/DebugLayer.cpp
@@ -28,7 +28,7 @@ std::unique_ptr<IWorkload> DebugLayer::CreateWorkload(const IWorkloadFactory& fa
SetAdditionalInfo(descriptor);
- return factory.CreateDebug(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::Debug, descriptor, PrepInfoAndDesc(descriptor));
}
DebugLayer* DebugLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/DepthToSpaceLayer.cpp b/src/armnn/layers/DepthToSpaceLayer.cpp
index 05d691e0d6..033154e81d 100644
--- a/src/armnn/layers/DepthToSpaceLayer.cpp
+++ b/src/armnn/layers/DepthToSpaceLayer.cpp
@@ -30,7 +30,7 @@ std::unique_ptr<IWorkload> DepthToSpaceLayer::CreateWorkload(const IWorkloadFact
SetAdditionalInfo(descriptor);
- return factory.CreateDepthToSpace(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::DepthToSpace, descriptor, PrepInfoAndDesc(descriptor));
}
DepthToSpaceLayer* DepthToSpaceLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
index b1b95faa4f..db14e22b29 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
@@ -65,7 +65,7 @@ std::unique_ptr<IWorkload> DepthwiseConvolution2dLayer::CreateWorkload(const IWo
SetAdditionalInfo(descriptor);
- return factory.CreateDepthwiseConvolution2d(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::DepthwiseConvolution2d, descriptor, PrepInfoAndDesc(descriptor));
}
DepthwiseConvolution2dLayer* DepthwiseConvolution2dLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/DequantizeLayer.cpp b/src/armnn/layers/DequantizeLayer.cpp
index 84c90104d0..afa0a7382a 100644
--- a/src/armnn/layers/DequantizeLayer.cpp
+++ b/src/armnn/layers/DequantizeLayer.cpp
@@ -22,7 +22,7 @@ std::unique_ptr<IWorkload> DequantizeLayer::CreateWorkload(
DequantizeQueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
- return factory.CreateDequantize(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::Dequantize, descriptor, PrepInfoAndDesc(descriptor));
}
DequantizeLayer* DequantizeLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/DetectionPostProcessLayer.cpp b/src/armnn/layers/DetectionPostProcessLayer.cpp
index 5a7d888457..833ef43597 100644
--- a/src/armnn/layers/DetectionPostProcessLayer.cpp
+++ b/src/armnn/layers/DetectionPostProcessLayer.cpp
@@ -26,7 +26,7 @@ std::unique_ptr<IWorkload> DetectionPostProcessLayer::CreateWorkload(const armnn
descriptor.m_Anchors = m_Anchors.get();
SetAdditionalInfo(descriptor);
- return factory.CreateDetectionPostProcess(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::DetectionPostProcess, descriptor, PrepInfoAndDesc(descriptor));
}
DetectionPostProcessLayer* DetectionPostProcessLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/DivisionLayer.cpp b/src/armnn/layers/DivisionLayer.cpp
index c65e191f04..c6faf41a84 100644
--- a/src/armnn/layers/DivisionLayer.cpp
+++ b/src/armnn/layers/DivisionLayer.cpp
@@ -24,7 +24,7 @@ std::unique_ptr<IWorkload> DivisionLayer::CreateWorkload(const IWorkloadFactory&
DivisionQueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
- return factory.CreateDivision(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::Division, descriptor, PrepInfoAndDesc(descriptor));
}
DivisionLayer* DivisionLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/ElementwiseUnaryLayer.cpp b/src/armnn/layers/ElementwiseUnaryLayer.cpp
index 37d6084d5b..c50910bd32 100644
--- a/src/armnn/layers/ElementwiseUnaryLayer.cpp
+++ b/src/armnn/layers/ElementwiseUnaryLayer.cpp
@@ -23,13 +23,7 @@ ElementwiseUnaryLayer::ElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& p
std::unique_ptr<IWorkload> ElementwiseUnaryLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
ElementwiseUnaryQueueDescriptor descriptor;
-
- if (descriptor.m_Parameters.m_Operation == UnaryOperation::LogicalNot)
- {
- return factory.CreateLogicalUnary(descriptor, PrepInfoAndDesc(descriptor));
- }
-
- return factory.CreateElementwiseUnary(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::ElementwiseUnary, descriptor, PrepInfoAndDesc(descriptor));
}
ElementwiseUnaryLayer* ElementwiseUnaryLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/FakeQuantizationLayer.cpp b/src/armnn/layers/FakeQuantizationLayer.cpp
index 453891fa60..f375f9af43 100644
--- a/src/armnn/layers/FakeQuantizationLayer.cpp
+++ b/src/armnn/layers/FakeQuantizationLayer.cpp
@@ -23,7 +23,7 @@ std::unique_ptr<IWorkload> FakeQuantizationLayer::CreateWorkload(const IWorkload
FakeQuantizationQueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
- return factory.CreateFakeQuantization(descriptor, PrepInfoAndDesc(descriptor) );
+ return factory.CreateWorkload(LayerType::FakeQuantization, descriptor, PrepInfoAndDesc(descriptor) );
}
FakeQuantizationLayer* FakeQuantizationLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/FillLayer.cpp b/src/armnn/layers/FillLayer.cpp
index 9fb1bdaeaf..5004fabedf 100644
--- a/src/armnn/layers/FillLayer.cpp
+++ b/src/armnn/layers/FillLayer.cpp
@@ -23,7 +23,7 @@ std::unique_ptr<IWorkload> FillLayer::CreateWorkload(const IWorkloadFactory& fac
FillQueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
- return factory.CreateFill(descriptor, PrepInfoAndDesc(descriptor) );
+ return factory.CreateWorkload(LayerType::Fill, descriptor, PrepInfoAndDesc(descriptor) );
}
FillLayer* FillLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/FloorLayer.cpp b/src/armnn/layers/FloorLayer.cpp
index 5dbbc28ac3..616c118552 100644
--- a/src/armnn/layers/FloorLayer.cpp
+++ b/src/armnn/layers/FloorLayer.cpp
@@ -23,7 +23,7 @@ std::unique_ptr<IWorkload> FloorLayer::CreateWorkload(const IWorkloadFactory& fa
FloorQueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
- return factory.CreateFloor(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::Floor, descriptor, PrepInfoAndDesc(descriptor));
}
FloorLayer* FloorLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/FullyConnectedLayer.cpp b/src/armnn/layers/FullyConnectedLayer.cpp
index 261932a15c..6a9c3b07e4 100644
--- a/src/armnn/layers/FullyConnectedLayer.cpp
+++ b/src/armnn/layers/FullyConnectedLayer.cpp
@@ -32,7 +32,7 @@ std::unique_ptr<IWorkload> FullyConnectedLayer::CreateWorkload(const IWorkloadFa
}
SetAdditionalInfo(descriptor);
- return factory.CreateFullyConnected(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::FullyConnected, descriptor, PrepInfoAndDesc(descriptor));
}
FullyConnectedLayer* FullyConnectedLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/GatherLayer.cpp b/src/armnn/layers/GatherLayer.cpp
index 9b34c12a4b..33d2088e69 100644
--- a/src/armnn/layers/GatherLayer.cpp
+++ b/src/armnn/layers/GatherLayer.cpp
@@ -23,7 +23,7 @@ std::unique_ptr<IWorkload> GatherLayer::CreateWorkload(const armnn::IWorkloadFac
GatherQueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
- return factory.CreateGather(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::Gather, descriptor, PrepInfoAndDesc(descriptor));
}
GatherLayer* GatherLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/InstanceNormalizationLayer.cpp b/src/armnn/layers/InstanceNormalizationLayer.cpp
index 6d06c36404..44e98700c9 100644
--- a/src/armnn/layers/InstanceNormalizationLayer.cpp
+++ b/src/armnn/layers/InstanceNormalizationLayer.cpp
@@ -23,7 +23,7 @@ std::unique_ptr<IWorkload> InstanceNormalizationLayer::CreateWorkload(const IWor
InstanceNormalizationQueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
- return factory.CreateInstanceNormalization(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::InstanceNormalization, descriptor, PrepInfoAndDesc(descriptor));
}
InstanceNormalizationLayer* InstanceNormalizationLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/L2NormalizationLayer.cpp b/src/armnn/layers/L2NormalizationLayer.cpp
index 7a01b073bf..0e0ae2e66f 100644
--- a/src/armnn/layers/L2NormalizationLayer.cpp
+++ b/src/armnn/layers/L2NormalizationLayer.cpp
@@ -23,7 +23,7 @@ std::unique_ptr<IWorkload> L2NormalizationLayer::CreateWorkload(const IWorkloadF
L2NormalizationQueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
- return factory.CreateL2Normalization(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::L2Normalization, descriptor, PrepInfoAndDesc(descriptor));
}
L2NormalizationLayer* L2NormalizationLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/LogSoftmaxLayer.cpp b/src/armnn/layers/LogSoftmaxLayer.cpp
index e3632025a3..4f51a2baf8 100644
--- a/src/armnn/layers/LogSoftmaxLayer.cpp
+++ b/src/armnn/layers/LogSoftmaxLayer.cpp
@@ -23,7 +23,7 @@ std::unique_ptr<IWorkload> LogSoftmaxLayer::CreateWorkload(const IWorkloadFactor
LogSoftmaxQueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
- return factory.CreateLogSoftmax(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::LogSoftmax, descriptor, PrepInfoAndDesc(descriptor));
}
LogSoftmaxLayer* LogSoftmaxLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/LogicalBinaryLayer.cpp b/src/armnn/layers/LogicalBinaryLayer.cpp
index cc0ed24827..1a20c988a4 100644
--- a/src/armnn/layers/LogicalBinaryLayer.cpp
+++ b/src/armnn/layers/LogicalBinaryLayer.cpp
@@ -23,7 +23,7 @@ LogicalBinaryLayer::LogicalBinaryLayer(const LogicalBinaryDescriptor& param, con
std::unique_ptr<IWorkload> LogicalBinaryLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
LogicalBinaryQueueDescriptor descriptor;
- return factory.CreateLogicalBinary(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::LogicalBinary, descriptor, PrepInfoAndDesc(descriptor));
}
LogicalBinaryLayer* LogicalBinaryLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/LstmLayer.cpp b/src/armnn/layers/LstmLayer.cpp
index 44da98687a..46c7574cf8 100644
--- a/src/armnn/layers/LstmLayer.cpp
+++ b/src/armnn/layers/LstmLayer.cpp
@@ -74,7 +74,7 @@ std::unique_ptr<IWorkload> LstmLayer::CreateWorkload(const IWorkloadFactory& fac
SetAdditionalInfo(descriptor);
- return factory.CreateLstm(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::Lstm, descriptor, PrepInfoAndDesc(descriptor));
}
LstmLayer* LstmLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/MaximumLayer.cpp b/src/armnn/layers/MaximumLayer.cpp
index 077f83f34d..438c9be116 100644
--- a/src/armnn/layers/MaximumLayer.cpp
+++ b/src/armnn/layers/MaximumLayer.cpp
@@ -23,7 +23,7 @@ std::unique_ptr<IWorkload> MaximumLayer::CreateWorkload(const IWorkloadFactory&
MaximumQueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
- return factory.CreateMaximum(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::Maximum, descriptor, PrepInfoAndDesc(descriptor));
}
MaximumLayer* MaximumLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/MeanLayer.cpp b/src/armnn/layers/MeanLayer.cpp
index b56905fe99..f695cc3735 100644
--- a/src/armnn/layers/MeanLayer.cpp
+++ b/src/armnn/layers/MeanLayer.cpp
@@ -28,7 +28,7 @@ std::unique_ptr<IWorkload> MeanLayer::CreateWorkload(const armnn::IWorkloadFacto
descriptor.m_Parameters.m_KeepDims = m_Param.m_KeepDims;
SetAdditionalInfo(descriptor);
- return factory.CreateMean(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::Mean, descriptor, PrepInfoAndDesc(descriptor));
}
MeanLayer* MeanLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/MinimumLayer.cpp b/src/armnn/layers/MinimumLayer.cpp
index 43715acc14..894704132a 100644
--- a/src/armnn/layers/MinimumLayer.cpp
+++ b/src/armnn/layers/MinimumLayer.cpp
@@ -24,7 +24,7 @@ std::unique_ptr<IWorkload> MinimumLayer::CreateWorkload(const IWorkloadFactory&
MinimumQueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
- return factory.CreateMinimum(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::Minimum, descriptor, PrepInfoAndDesc(descriptor));
}
MinimumLayer* MinimumLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/MultiplicationLayer.cpp b/src/armnn/layers/MultiplicationLayer.cpp
index 05e266c793..36f2689506 100644
--- a/src/armnn/layers/MultiplicationLayer.cpp
+++ b/src/armnn/layers/MultiplicationLayer.cpp
@@ -24,7 +24,7 @@ std::unique_ptr<IWorkload> MultiplicationLayer::CreateWorkload(const IWorkloadFa
MultiplicationQueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
- return factory.CreateMultiplication(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::Multiplication, descriptor, PrepInfoAndDesc(descriptor));
}
MultiplicationLayer* MultiplicationLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/NormalizationLayer.cpp b/src/armnn/layers/NormalizationLayer.cpp
index e7b6de5de4..e42a7cf28e 100644
--- a/src/armnn/layers/NormalizationLayer.cpp
+++ b/src/armnn/layers/NormalizationLayer.cpp
@@ -23,7 +23,7 @@ std::unique_ptr<IWorkload> NormalizationLayer::CreateWorkload(const IWorkloadFac
NormalizationQueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
- return factory.CreateNormalization(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::Normalization, descriptor, PrepInfoAndDesc(descriptor));
}
NormalizationLayer* NormalizationLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/PadLayer.cpp b/src/armnn/layers/PadLayer.cpp
index 667270a948..7900fa5a97 100644
--- a/src/armnn/layers/PadLayer.cpp
+++ b/src/armnn/layers/PadLayer.cpp
@@ -26,7 +26,7 @@ std::unique_ptr<IWorkload> PadLayer::CreateWorkload(const armnn::IWorkloadFactor
descriptor.m_Parameters.m_PaddingMode = m_Param.m_PaddingMode;
SetAdditionalInfo(descriptor);
- return factory.CreatePad(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::Pad, descriptor, PrepInfoAndDesc(descriptor));
}
PadLayer* PadLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/PermuteLayer.cpp b/src/armnn/layers/PermuteLayer.cpp
index 1a4d87b19a..e20eea6815 100644
--- a/src/armnn/layers/PermuteLayer.cpp
+++ b/src/armnn/layers/PermuteLayer.cpp
@@ -27,7 +27,7 @@ std::unique_ptr<IWorkload> PermuteLayer::CreateWorkload(const IWorkloadFactory&
PermuteQueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
- return factory.CreatePermute(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::Permute, descriptor, PrepInfoAndDesc(descriptor));
}
PermuteLayer* PermuteLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/Pooling2dLayer.cpp b/src/armnn/layers/Pooling2dLayer.cpp
index ce58012493..9fb055b27d 100644
--- a/src/armnn/layers/Pooling2dLayer.cpp
+++ b/src/armnn/layers/Pooling2dLayer.cpp
@@ -29,7 +29,7 @@ std::unique_ptr<IWorkload> Pooling2dLayer::CreateWorkload(const IWorkloadFactory
Pooling2dQueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
- return factory.CreatePooling2d(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::Pooling2d, descriptor, PrepInfoAndDesc(descriptor));
}
Pooling2dLayer* Pooling2dLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/Pooling3dLayer.cpp b/src/armnn/layers/Pooling3dLayer.cpp
index 4c083f337c..046e146423 100644
--- a/src/armnn/layers/Pooling3dLayer.cpp
+++ b/src/armnn/layers/Pooling3dLayer.cpp
@@ -29,7 +29,7 @@ std::unique_ptr<IWorkload> Pooling3dLayer::CreateWorkload(const IWorkloadFactory
Pooling3dQueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
- return factory.CreatePooling3d(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::Pooling3d, descriptor, PrepInfoAndDesc(descriptor));
}
Pooling3dLayer* Pooling3dLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/PreCompiledLayer.cpp b/src/armnn/layers/PreCompiledLayer.cpp
index 80320e3ba2..ff2fa322e7 100644
--- a/src/armnn/layers/PreCompiledLayer.cpp
+++ b/src/armnn/layers/PreCompiledLayer.cpp
@@ -34,7 +34,7 @@ std::unique_ptr<IWorkload> PreCompiledLayer::CreateWorkload(const armnn::IWorklo
descriptor.m_PreCompiledObject = m_PreCompiledObject.get();
SetAdditionalInfo(descriptor);
- return factory.CreatePreCompiled(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::PreCompiled, descriptor, PrepInfoAndDesc(descriptor));
}
void PreCompiledLayer::ValidateTensorShapesFromInputs()
diff --git a/src/armnn/layers/PreluLayer.cpp b/src/armnn/layers/PreluLayer.cpp
index 8d88ed4829..431e2f4e38 100644
--- a/src/armnn/layers/PreluLayer.cpp
+++ b/src/armnn/layers/PreluLayer.cpp
@@ -25,7 +25,7 @@ std::unique_ptr<IWorkload> PreluLayer::CreateWorkload(const IWorkloadFactory& fa
PreluQueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
- return factory.CreatePrelu(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::Prelu, descriptor, PrepInfoAndDesc(descriptor));
}
PreluLayer* PreluLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/QLstmLayer.cpp b/src/armnn/layers/QLstmLayer.cpp
index a09aaeeb95..17031fa112 100644
--- a/src/armnn/layers/QLstmLayer.cpp
+++ b/src/armnn/layers/QLstmLayer.cpp
@@ -75,7 +75,7 @@ std::unique_ptr<IWorkload> QLstmLayer::CreateWorkload(const IWorkloadFactory& fa
SetAdditionalInfo(descriptor);
- return factory.CreateQLstm(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::QLstm, descriptor, PrepInfoAndDesc(descriptor));
}
QLstmLayer* QLstmLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/QuantizeLayer.cpp b/src/armnn/layers/QuantizeLayer.cpp
index e37d6f5300..55f23bf251 100644
--- a/src/armnn/layers/QuantizeLayer.cpp
+++ b/src/armnn/layers/QuantizeLayer.cpp
@@ -23,7 +23,7 @@ std::unique_ptr<IWorkload> QuantizeLayer::CreateWorkload(const IWorkloadFactory&
WorkloadInfo info = PrepInfoAndDesc(descriptor);
- return factory.CreateQuantize(descriptor, info);
+ return factory.CreateWorkload(LayerType::Quantize, descriptor, info);
}
Layer* QuantizeLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/QuantizedLstmLayer.cpp b/src/armnn/layers/QuantizedLstmLayer.cpp
index 6a09241645..7fd39f14b1 100644
--- a/src/armnn/layers/QuantizedLstmLayer.cpp
+++ b/src/armnn/layers/QuantizedLstmLayer.cpp
@@ -41,7 +41,7 @@ std::unique_ptr<IWorkload> QuantizedLstmLayer::CreateWorkload(const IWorkloadFac
SetAdditionalInfo(descriptor);
- return factory.CreateQuantizedLstm(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::QuantizedLstm, descriptor, PrepInfoAndDesc(descriptor));
}
QuantizedLstmLayer* QuantizedLstmLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/RankLayer.cpp b/src/armnn/layers/RankLayer.cpp
index 17ca691c51..84d25bf756 100644
--- a/src/armnn/layers/RankLayer.cpp
+++ b/src/armnn/layers/RankLayer.cpp
@@ -22,7 +22,7 @@ std::unique_ptr<IWorkload> RankLayer::CreateWorkload(const IWorkloadFactory& fac
RankQueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
- return factory.CreateRank(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::Rank, descriptor, PrepInfoAndDesc(descriptor));
}
Layer* RankLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/ReduceLayer.cpp b/src/armnn/layers/ReduceLayer.cpp
index b03ac72e50..1f4387b58c 100644
--- a/src/armnn/layers/ReduceLayer.cpp
+++ b/src/armnn/layers/ReduceLayer.cpp
@@ -27,7 +27,7 @@ std::unique_ptr<IWorkload> ReduceLayer::CreateWorkload(const IWorkloadFactory& f
descriptor.m_Parameters.m_ReduceOperation = m_Param.m_ReduceOperation;
SetAdditionalInfo(descriptor);
- return factory.CreateReduce(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::Reduce, descriptor, PrepInfoAndDesc(descriptor));
}
ReduceLayer* ReduceLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/ReshapeLayer.cpp b/src/armnn/layers/ReshapeLayer.cpp
index 571013d2e6..b194f7a48d 100644
--- a/src/armnn/layers/ReshapeLayer.cpp
+++ b/src/armnn/layers/ReshapeLayer.cpp
@@ -24,7 +24,7 @@ std::unique_ptr<IWorkload> ReshapeLayer::CreateWorkload(const IWorkloadFactory&
ReshapeQueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
- return factory.CreateReshape(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::Reshape, descriptor, PrepInfoAndDesc(descriptor));
}
ReshapeLayer* ReshapeLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/ResizeLayer.cpp b/src/armnn/layers/ResizeLayer.cpp
index 9c407c1af5..89a94f78d3 100644
--- a/src/armnn/layers/ResizeLayer.cpp
+++ b/src/armnn/layers/ResizeLayer.cpp
@@ -28,7 +28,7 @@ std::unique_ptr<IWorkload> ResizeLayer::CreateWorkload(const IWorkloadFactory& f
ResizeQueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
- return factory.CreateResize(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::Resize, descriptor, PrepInfoAndDesc(descriptor));
}
ResizeLayer* ResizeLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/RsqrtLayer.cpp b/src/armnn/layers/RsqrtLayer.cpp
index adac012173..3a63b7c502 100644
--- a/src/armnn/layers/RsqrtLayer.cpp
+++ b/src/armnn/layers/RsqrtLayer.cpp
@@ -21,10 +21,11 @@ RsqrtLayer::RsqrtLayer(const char* name)
std::unique_ptr<IWorkload> RsqrtLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
- RsqrtQueueDescriptor descriptor;
+ ElementwiseUnaryQueueDescriptor descriptor;
+ descriptor.m_Parameters.m_Operation = UnaryOperation::Rsqrt;
SetAdditionalInfo(descriptor);
- return factory.CreateRsqrt(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::ElementwiseUnary, descriptor, PrepInfoAndDesc(descriptor));
}
RsqrtLayer* RsqrtLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/ShapeLayer.cpp b/src/armnn/layers/ShapeLayer.cpp
index 318f38c5a2..ecc112c02e 100644
--- a/src/armnn/layers/ShapeLayer.cpp
+++ b/src/armnn/layers/ShapeLayer.cpp
@@ -26,7 +26,7 @@ std::unique_ptr<IWorkload> ShapeLayer::CreateWorkload(const IWorkloadFactory& fa
ShapeQueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
- return factory.CreateShape(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::Shape, descriptor, PrepInfoAndDesc(descriptor));
}
ShapeLayer* ShapeLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/SliceLayer.cpp b/src/armnn/layers/SliceLayer.cpp
index c87cab342b..0d61181c5d 100644
--- a/src/armnn/layers/SliceLayer.cpp
+++ b/src/armnn/layers/SliceLayer.cpp
@@ -26,7 +26,7 @@ std::unique_ptr<IWorkload> SliceLayer::CreateWorkload(const IWorkloadFactory& fa
SliceQueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
- return factory.CreateSlice(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::Slice, descriptor, PrepInfoAndDesc(descriptor));
}
SliceLayer* SliceLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/SoftmaxLayer.cpp b/src/armnn/layers/SoftmaxLayer.cpp
index 3a6dfc4958..a2930e6035 100644
--- a/src/armnn/layers/SoftmaxLayer.cpp
+++ b/src/armnn/layers/SoftmaxLayer.cpp
@@ -23,7 +23,7 @@ std::unique_ptr<IWorkload> SoftmaxLayer::CreateWorkload(const IWorkloadFactory&
SoftmaxQueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
- return factory.CreateSoftmax(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::Softmax, descriptor, PrepInfoAndDesc(descriptor));
}
SoftmaxLayer* SoftmaxLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/SpaceToBatchNdLayer.cpp b/src/armnn/layers/SpaceToBatchNdLayer.cpp
index e801925f58..a4c6d1b237 100644
--- a/src/armnn/layers/SpaceToBatchNdLayer.cpp
+++ b/src/armnn/layers/SpaceToBatchNdLayer.cpp
@@ -31,7 +31,7 @@ std::unique_ptr<IWorkload> SpaceToBatchNdLayer::CreateWorkload(const IWorkloadFa
descriptor.m_Parameters.m_PadList = m_Param.m_PadList;
SetAdditionalInfo(descriptor);
- return factory.CreateSpaceToBatchNd(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::SpaceToBatchNd, descriptor, PrepInfoAndDesc(descriptor));
}
SpaceToBatchNdLayer* SpaceToBatchNdLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/SpaceToDepthLayer.cpp b/src/armnn/layers/SpaceToDepthLayer.cpp
index 612d940615..51d79f4d03 100644
--- a/src/armnn/layers/SpaceToDepthLayer.cpp
+++ b/src/armnn/layers/SpaceToDepthLayer.cpp
@@ -32,7 +32,7 @@ std::unique_ptr<IWorkload> SpaceToDepthLayer::CreateWorkload(const IWorkloadFact
SetAdditionalInfo(descriptor);
- return factory.CreateSpaceToDepth(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::SpaceToDepth, descriptor, PrepInfoAndDesc(descriptor));
}
SpaceToDepthLayer* SpaceToDepthLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/SplitterLayer.cpp b/src/armnn/layers/SplitterLayer.cpp
index 5e658cebb3..42cb6e1950 100644
--- a/src/armnn/layers/SplitterLayer.cpp
+++ b/src/armnn/layers/SplitterLayer.cpp
@@ -31,7 +31,7 @@ std::unique_ptr<IWorkload> SplitterLayer::CreateWorkload(const IWorkloadFactory&
SetAdditionalInfo(descriptor);
- return factory.CreateSplitter(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::Splitter, descriptor, PrepInfoAndDesc(descriptor));
}
template<typename FactoryType>
diff --git a/src/armnn/layers/StackLayer.cpp b/src/armnn/layers/StackLayer.cpp
index 09f255bb2e..b842f1b4d5 100644
--- a/src/armnn/layers/StackLayer.cpp
+++ b/src/armnn/layers/StackLayer.cpp
@@ -24,7 +24,7 @@ std::unique_ptr<IWorkload> StackLayer::CreateWorkload(const IWorkloadFactory& fa
StackQueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
- return factory.CreateStack(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::Stack, descriptor, PrepInfoAndDesc(descriptor));
}
StackLayer* StackLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/StridedSliceLayer.cpp b/src/armnn/layers/StridedSliceLayer.cpp
index e80ec2277d..56051c28ee 100644
--- a/src/armnn/layers/StridedSliceLayer.cpp
+++ b/src/armnn/layers/StridedSliceLayer.cpp
@@ -36,7 +36,7 @@ std::unique_ptr<IWorkload> StridedSliceLayer::CreateWorkload(const IWorkloadFact
SetAdditionalInfo(descriptor);
- return factory.CreateStridedSlice(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::StridedSlice, descriptor, PrepInfoAndDesc(descriptor));
}
StridedSliceLayer* StridedSliceLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/SubtractionLayer.cpp b/src/armnn/layers/SubtractionLayer.cpp
index c5f9ca9a7e..8e9b1733b7 100644
--- a/src/armnn/layers/SubtractionLayer.cpp
+++ b/src/armnn/layers/SubtractionLayer.cpp
@@ -24,7 +24,7 @@ std::unique_ptr<IWorkload> SubtractionLayer::CreateWorkload(const IWorkloadFacto
SubtractionQueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
- return factory.CreateSubtraction(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::Subtraction, descriptor, PrepInfoAndDesc(descriptor));
}
SubtractionLayer* SubtractionLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/SwitchLayer.cpp b/src/armnn/layers/SwitchLayer.cpp
index 810bd33911..afa4d52f9d 100644
--- a/src/armnn/layers/SwitchLayer.cpp
+++ b/src/armnn/layers/SwitchLayer.cpp
@@ -21,7 +21,7 @@ std::unique_ptr<IWorkload> SwitchLayer::CreateWorkload(const IWorkloadFactory& f
SwitchQueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
- return factory.CreateSwitch(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::Switch, descriptor, PrepInfoAndDesc(descriptor));
}
SwitchLayer* SwitchLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.cpp b/src/armnn/layers/TransposeConvolution2dLayer.cpp
index 689a3f5be3..a1f07f9eca 100644
--- a/src/armnn/layers/TransposeConvolution2dLayer.cpp
+++ b/src/armnn/layers/TransposeConvolution2dLayer.cpp
@@ -37,7 +37,7 @@ std::unique_ptr<IWorkload> TransposeConvolution2dLayer::CreateWorkload(const IWo
SetAdditionalInfo(descriptor);
- return factory.CreateTransposeConvolution2d(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::TransposeConvolution2d, descriptor, PrepInfoAndDesc(descriptor));
}
TransposeConvolution2dLayer* TransposeConvolution2dLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/TransposeLayer.cpp b/src/armnn/layers/TransposeLayer.cpp
index 3c34df9372..3340b9ddf9 100644
--- a/src/armnn/layers/TransposeLayer.cpp
+++ b/src/armnn/layers/TransposeLayer.cpp
@@ -27,7 +27,7 @@ std::unique_ptr<IWorkload> TransposeLayer::CreateWorkload(const IWorkloadFactory
TransposeQueueDescriptor descriptor;
SetAdditionalInfo(descriptor);
- return factory.CreateTranspose(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::Transpose, descriptor, PrepInfoAndDesc(descriptor));
}
TransposeLayer* TransposeLayer::Clone(Graph& graph) const
diff --git a/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp b/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp
index 911ba2e0ff..c9aaa8c171 100644
--- a/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp
+++ b/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp
@@ -74,7 +74,7 @@ std::unique_ptr<IWorkload> UnidirectionalSequenceLstmLayer::CreateWorkload(const
SetAdditionalInfo(descriptor);
- return factory.CreateUnidirectionalSequenceLstm(descriptor, PrepInfoAndDesc(descriptor));
+ return factory.CreateWorkload(LayerType::UnidirectionalSequenceLstm, descriptor, PrepInfoAndDesc(descriptor));
}
UnidirectionalSequenceLstmLayer* UnidirectionalSequenceLstmLayer::Clone(Graph& graph) const
diff --git a/src/backends/aclCommon/test/MemCopyTestImpl.hpp b/src/backends/aclCommon/test/MemCopyTestImpl.hpp
index db794b94da..956ea27c15 100644
--- a/src/backends/aclCommon/test/MemCopyTestImpl.hpp
+++ b/src/backends/aclCommon/test/MemCopyTestImpl.hpp
@@ -65,7 +65,7 @@ LayerTestResult<T, 4> MemCopyTest(armnn::IWorkloadFactory& srcWorkloadFactory,
AddInputToWorkload(memCopyQueueDesc, workloadInfo, tensorInfo, workloadInput.get());
AddOutputToWorkload(memCopyQueueDesc, workloadInfo, tensorInfo, workloadOutput.get());
- dstWorkloadFactory.CreateMemCopy(memCopyQueueDesc, workloadInfo)->Execute();
+ dstWorkloadFactory.CreateWorkload(armnn::LayerType::MemCopy, memCopyQueueDesc, workloadInfo)->Execute();
CopyDataFromITensorHandle(actualOutput.data(), workloadOutput.get());
ret.m_ActualData = actualOutput;
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 9c47a19208..56874a6a8c 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -1538,6 +1538,366 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
outReasonIfUnsupported,
modelOptions);
}
+ARMNN_NO_DEPRECATE_WARN_BEGIN
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateWorkload(LayerType type,
+ const QueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ switch(type)
+ {
+ case LayerType::Activation :
+ {
+ auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
+ return CreateActivation(*activationQueueDescriptor, info);
+ }
+ case LayerType::Addition :
+ {
+ auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
+ return CreateAddition(*additionQueueDescriptor, info);
+ }
+ case LayerType::ArgMinMax :
+ {
+ auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
+ return CreateArgMinMax(*argMinMaxQueueDescriptor, info);
+ }
+ case LayerType::BatchNormalization :
+ {
+ auto batchNormQueueDescriptor = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
+ return CreateBatchNormalization(*batchNormQueueDescriptor, info);
+ }
+ case LayerType::BatchToSpaceNd :
+ {
+ auto batchToSpaceNdQueueDescriptor
+ = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
+ return CreateBatchToSpaceNd(*batchToSpaceNdQueueDescriptor, info);
+ }
+ case LayerType::Cast :
+ {
+ auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
+ return CreateCast(*castQueueDescriptor, info);
+ }
+ case LayerType::ChannelShuffle :
+ {
+ auto channelShuffleQueueDescriptor
+ = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
+ return CreateChannelShuffle(*channelShuffleQueueDescriptor, info);
+ }
+ case LayerType::Comparison :
+ {
+ auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
+ return CreateComparison(*comparisonQueueDescriptor, info);
+ }
+ case LayerType::Concat :
+ {
+ auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
+ return CreateConcat(*concatQueueDescriptor, info);
+ }
+ case LayerType::Constant :
+ {
+ auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
+ return CreateConstant(*constantQueueDescriptor, info);
+ }
+ case LayerType::ConvertBf16ToFp32 :
+ {
+ auto convertBf16ToFp32QueueDescriptor
+ = PolymorphicDowncast<const ConvertBf16ToFp32QueueDescriptor*>(&descriptor);
+ return CreateConvertBf16ToFp32(*convertBf16ToFp32QueueDescriptor, info);
+ }
+ case LayerType::ConvertFp16ToFp32:
+ {
+ auto convertFp16ToFp32QueueDescriptor
+ = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
+ return CreateConvertFp16ToFp32(*convertFp16ToFp32QueueDescriptor, info);
+ }
+ case LayerType::ConvertFp32ToBf16:
+ {
+ auto convertFp32ToBf16QueueDescriptor
+ = PolymorphicDowncast<const ConvertFp32ToBf16QueueDescriptor*>(&descriptor);
+ return CreateConvertFp32ToBf16(*convertFp32ToBf16QueueDescriptor, info);
+ }
+ case LayerType::ConvertFp32ToFp16:
+ {
+ auto convertFp32ToFp16QueueDescriptor
+ = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
+ return CreateConvertFp32ToFp16(*convertFp32ToFp16QueueDescriptor, info);
+ }
+ case LayerType::Convolution2d:
+ {
+ auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
+ return CreateConvolution2d(*convolution2dQueueDescriptor, info);
+ }
+ case LayerType::Convolution3d:
+ {
+ auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
+ return CreateConvolution3d(*convolution3dQueueDescriptor, info);
+ }
+ case LayerType::Debug:
+ {
+ auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
+ return CreateDebug(*debugQueueDescriptor, info);
+ }
+ case LayerType::DepthToSpace:
+ {
+ auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
+ return CreateDepthToSpace(*depthToSpaceQueueDescriptor, info);
+ }
+ case LayerType::DepthwiseConvolution2d:
+ {
+ auto depthwiseConvolution2DQueueDescriptor
+ = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
+ return CreateDepthwiseConvolution2d(*depthwiseConvolution2DQueueDescriptor, info);
+ }
+ case LayerType::Dequantize:
+ {
+ auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
+ return CreateDequantize(*dequantizeQueueDescriptor, info);
+ }
+ case LayerType::DetectionPostProcess:
+ {
+ auto detectionPostProcessQueueDescriptor
+ = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
+ return CreateDetectionPostProcess(*detectionPostProcessQueueDescriptor, info);
+ }
+ case LayerType::Division:
+ {
+ auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
+ return CreateDivision(*divisionQueueDescriptor, info);
+ }
+ case LayerType::ElementwiseUnary:
+ {
+ auto elementwiseUnaryQueueDescriptor
+ = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
+ return CreateElementwiseUnary(*elementwiseUnaryQueueDescriptor, info);
+
+ }
+ case LayerType::FakeQuantization:
+ {
+ auto fakeQuantizationQueueDescriptor
+ = PolymorphicDowncast<const FakeQuantizationQueueDescriptor*>(&descriptor);
+ return CreateFakeQuantization(*fakeQuantizationQueueDescriptor, info);
+ }
+ case LayerType::Fill:
+ {
+ auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
+ return CreateFill(*fillQueueDescriptor, info);
+ }
+ case LayerType::Floor:
+ {
+ auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
+ return CreateFloor(*floorQueueDescriptor, info);
+ }
+ case LayerType::FullyConnected:
+ {
+ auto fullyConnectedQueueDescriptor
+ = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
+ return CreateFullyConnected(*fullyConnectedQueueDescriptor, info);
+ }
+ case LayerType::Gather:
+ {
+ auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
+ return CreateGather(*gatherQueueDescriptor, info);
+ }
+ case LayerType::Input:
+ {
+ auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
+ return CreateInput(*inputQueueDescriptor, info);
+ }
+ case LayerType::InstanceNormalization:
+ {
+ auto instanceNormalizationQueueDescriptor
+ = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
+ return CreateInstanceNormalization(*instanceNormalizationQueueDescriptor, info);
+ }
+ case LayerType::L2Normalization:
+ {
+ auto l2NormalizationQueueDescriptor
+ = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
+ return CreateL2Normalization(*l2NormalizationQueueDescriptor, info);
+ }
+ case LayerType::LogicalBinary:
+ {
+ auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
+ return CreateLogicalBinary(*logicalBinaryQueueDescriptor, info);
+ }
+ case LayerType::LogSoftmax:
+ {
+ auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
+ return CreateLogSoftmax(*logSoftmaxQueueDescriptor, info);
+ }
+ case LayerType::Lstm:
+ {
+ auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
+ return CreateLstm(*lstmQueueDescriptor, info);
+ }
+ case LayerType::Maximum:
+ {
+ auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
+ return CreateMaximum(*maximumQueueDescriptor, info);
+ }
+ case LayerType::Mean:
+ {
+ auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
+ return CreateMean(*meanQueueDescriptor, info);
+ }
+ case LayerType::MemCopy:
+ {
+ auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
+ return CreateMemCopy(*memCopyQueueDescriptor, info);
+ }
+ case LayerType::MemImport:
+ {
+ auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
+ return CreateMemImport(*memImportQueueDescriptor, info);
+ }
+ case LayerType::Minimum:
+ {
+ auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
+ return CreateMinimum(*minimumQueueDescriptor, info);
+ }
+ case LayerType::Multiplication:
+ {
+ auto multiplicationQueueDescriptor
+ = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
+ return CreateMultiplication(*multiplicationQueueDescriptor, info);
+ }
+ case LayerType::Normalization:
+ {
+ auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
+ return CreateNormalization(*normalizationQueueDescriptor, info);
+ }
+ case LayerType::Output:
+ {
+ auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
+ return CreateOutput(*outputQueueDescriptor, info);
+ }
+ case LayerType::Pad:
+ {
+ auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
+ return CreatePad(*padQueueDescriptor, info);
+ }
+ case LayerType::Permute:
+ {
+ auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
+ return CreatePermute(*permuteQueueDescriptor, info);
+ }
+ case LayerType::Pooling2d:
+ {
+ auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
+ return CreatePooling2d(*pooling2dQueueDescriptor, info);
+ }
+ case LayerType::Pooling3d:
+ {
+ auto pooling3dQueueDescriptor = PolymorphicDowncast<const Pooling3dQueueDescriptor*>(&descriptor);
+ return CreatePooling3d(*pooling3dQueueDescriptor, info);
+ }
+ case LayerType::PreCompiled:
+ {
+ auto preCompiledQueueDescriptor = PolymorphicDowncast<const PreCompiledQueueDescriptor*>(&descriptor);
+ return CreatePreCompiled(*preCompiledQueueDescriptor, info);
+ }
+ case LayerType::Prelu:
+ {
+ auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
+ return CreatePrelu(*preluQueueDescriptor, info);
+ }
+ case LayerType::QLstm:
+ {
+ auto qlstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
+ return CreateQLstm(*qlstmQueueDescriptor, info);
+ }
+ case LayerType::Quantize:
+ {
+ auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
+ return CreateQuantize(*quantizeQueueDescriptor, info);
+ }
+ case LayerType::Rank:
+ {
+ auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
+ return CreateRank(*rankQueueDescriptor, info);
+ }
+ case LayerType::Reduce:
+ {
+ auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
+ return CreateReduce(*reduceQueueDescriptor, info);
+ }
+ case LayerType::Reshape:
+ {
+ auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
+ return CreateReshape(*reshapeQueueDescriptor, info);
+ }
+ case LayerType::Resize:
+ {
+ auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
+ return CreateResize(*resizeQueueDescriptor, info);
+ }
+ case LayerType::Shape:
+ {
+ auto shapeQueueDescriptor = PolymorphicDowncast<const ShapeQueueDescriptor*>(&descriptor);
+ return CreateShape(*shapeQueueDescriptor, info);
+ }
+ case LayerType::Slice:
+ {
+ auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
+ return CreateSlice(*sliceQueueDescriptor, info);
+ }
+ case LayerType::Softmax:
+ {
+ auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
+ return CreateSoftmax(*softmaxQueueDescriptor, info);
+ }
+ case LayerType::SpaceToBatchNd:
+ {
+ auto spaceToBatchNdQueueDescriptor
+ = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
+ return CreateSpaceToBatchNd(*spaceToBatchNdQueueDescriptor, info);
+ }
+ case LayerType::SpaceToDepth:
+ {
+ auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
+ return CreateSpaceToDepth(*spaceToDepthQueueDescriptor, info);
+ }
+ case LayerType::Splitter:
+ {
+ auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
+ return CreateSplitter(*splitterQueueDescriptor, info);
+ }
+ case LayerType::Stack:
+ {
+ auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
+ return CreateStack(*stackQueueDescriptor, info);
+ }
+ case LayerType::StridedSlice:
+ {
+ auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
+ return CreateStridedSlice(*stridedSliceQueueDescriptor, info);
+ }
+ case LayerType::Subtraction:
+ {
+ auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
+ return CreateSubtraction(*subtractionQueueDescriptor, info);
+ }
+ case LayerType::Transpose:
+ {
+ auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
+ return CreateTranspose(*transposeQueueDescriptor, info);
+ }
+ case LayerType::TransposeConvolution2d:
+ {
+ auto transposeConvolution2dQueueDescriptor
+ = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
+ return CreateTransposeConvolution2d(*transposeConvolution2dQueueDescriptor, info);
+ }
+ case LayerType::UnidirectionalSequenceLstm:
+ {
+ auto unidirectionalSequenceLstmQueueDescriptor
+ = PolymorphicDowncast<const UnidirectionalSequenceLstmQueueDescriptor*>(&descriptor);
+ return CreateUnidirectionalSequenceLstm(*unidirectionalSequenceLstmQueueDescriptor, info);
+ }
+ default:
+ return nullptr;
+ }
+}
+ARMNN_NO_DEPRECATE_WARN_END
std::unique_ptr<IWorkload> IWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& /*descriptor*/,
const WorkloadInfo& /*info*/) const
diff --git a/src/backends/backendsCommon/WorkloadFactoryBase.hpp b/src/backends/backendsCommon/WorkloadFactoryBase.hpp
index 42a506afe0..00e549c933 100644
--- a/src/backends/backendsCommon/WorkloadFactoryBase.hpp
+++ b/src/backends/backendsCommon/WorkloadFactoryBase.hpp
@@ -34,6 +34,11 @@ public:
const bool /*IsMemoryManaged*/) const override
{ return nullptr; }
+ std::unique_ptr<IWorkload> CreateWorkload(LayerType /*type*/,
+ const QueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
+ { return nullptr; }
+
std::unique_ptr<IWorkload> CreateActivation(const ActivationQueueDescriptor& /*descriptor*/,
const WorkloadInfo& /*info*/) const override
{ return nullptr; }
@@ -105,19 +110,14 @@ public:
std::unique_ptr<IWorkload> CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
const WorkloadInfo& info) const override
{
- if (descriptor.m_Parameters.m_Operation == UnaryOperation::Abs)
+ if (descriptor.m_Parameters.m_Operation == UnaryOperation::LogicalNot)
{
- { return nullptr; }
+ return CreateWorkload(armnn::LayerType::ElementwiseUnary, descriptor, info);
}
- else if (descriptor.m_Parameters.m_Operation == UnaryOperation::Rsqrt)
+ else
{
{ return nullptr; }
}
- else if (descriptor.m_Parameters.m_Operation == UnaryOperation::LogicalNot)
- {
- return CreateLogicalUnary(descriptor, info);
- }
- { return nullptr; }
}
std::unique_ptr<IWorkload> CreateFakeQuantization(const FakeQuantizationQueueDescriptor& /*descriptor*/,
diff --git a/src/backends/backendsCommon/test/DynamicBackendTests.hpp b/src/backends/backendsCommon/test/DynamicBackendTests.hpp
index 1782fa44f6..6c37d18042 100644
--- a/src/backends/backendsCommon/test/DynamicBackendTests.hpp
+++ b/src/backends/backendsCommon/test/DynamicBackendTests.hpp
@@ -1474,7 +1474,9 @@ void CreateReferenceDynamicBackendTestImpl()
convolution2dQueueDescriptor.m_Weight = weights.get();
// Create a convolution workload with the dummy settings
- auto workload = referenceWorkloadFactory->CreateConvolution2d(convolution2dQueueDescriptor, workloadInfo);
+ auto workload = referenceWorkloadFactory->CreateWorkload(LayerType::Convolution2d,
+ convolution2dQueueDescriptor,
+ workloadInfo);
CHECK((workload != nullptr));
CHECK(workload.get() == PolymorphicDowncast<RefConvolution2dWorkload*>(workload.get()));
}
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index bc5e335a93..06f3eb561e 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -549,7 +549,7 @@ struct LayerTypePolicy<armnn::LayerType::name, DataType> \
{ \
QueueDesc desc; \
armnn::WorkloadInfo info = MakeDummyWorkloadInfo<DataType>(nIn, nOut); \
- return factory->Create##name(desc, info); \
+ return factory->CreateWorkload(armnn::LayerType::name, desc, info); \
} \
};
diff --git a/src/backends/backendsCommon/test/WorkloadDataValidation.cpp b/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
index 119f8744d3..c715d28ebe 100644
--- a/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
+++ b/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
@@ -50,7 +50,8 @@ TEST_CASE("QueueDescriptor_Validate_WrongNumOfInputsOutputs")
InputQueueDescriptor invalidData;
WorkloadInfo invalidInfo;
//Invalid argument exception is expected, because no inputs and no outputs were defined.
- CHECK_THROWS_AS(RefWorkloadFactory().CreateInput(invalidData, invalidInfo), armnn::InvalidArgumentException);
+ CHECK_THROWS_AS(RefWorkloadFactory().CreateWorkload(LayerType::Input, invalidData, invalidInfo),
+ armnn::InvalidArgumentException);
}
TEST_CASE("RefPooling2dFloat32Workload_Validate_WrongDimTensor")
diff --git a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
index 57687051b6..ea9973b7d3 100644
--- a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
@@ -71,7 +71,8 @@ LayerTestResult<T, 4> BoundedReLuTestCommon(
descriptor.m_Parameters.m_A = upperBound;
descriptor.m_Parameters.m_B = lowerBound;
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Activation,
+ descriptor, workloadInfo);
inputHandle->Allocate();
outputHandle->Allocate();
@@ -269,7 +270,8 @@ std::vector<float> BoundedReLuRandomInputTest(
AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
descriptor.m_Parameters = activationDescriptor;
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Activation,
+ descriptor, workloadInfo);
inputHandle->Allocate();
outputHandle->Allocate();
@@ -352,7 +354,8 @@ LayerTestResult<T, 4> ConstantLinearActivationTestCommon(
data.m_Parameters.m_B = 0.0f;
data.m_Parameters.m_Function = armnn::ActivationFunction::Linear;
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Activation,
+ data, info);
inputHandle->Allocate();
outputHandle->Allocate();
@@ -458,7 +461,8 @@ LayerTestResult<T, 4> SimpleActivationTest(
descriptor.m_Parameters.m_A = activationParameterA;
descriptor.m_Parameters.m_B = activationParameterB;
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Activation,
+ descriptor, workloadInfo);
inputHandle->Allocate();
outputHandle->Allocate();
@@ -875,7 +879,8 @@ LayerTestResult<float, 5> SqrtNNTest(
descriptor.m_Parameters.m_Function = armnn::ActivationFunction::Sqrt;
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Activation,
+ descriptor, workloadInfo);
inputHandle->Allocate();
outputHandle->Allocate();
@@ -1278,9 +1283,11 @@ LayerTestResult<T, 4> CompareActivationTestImpl(
SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Activation,
+ data, info);
ARMNN_ASSERT(workload != nullptr);
- std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateActivation(refData, refInfo);
+ std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateWorkload(armnn::LayerType::Activation,
+ refData, refInfo);
ARMNN_ASSERT(workloadRef != nullptr);
inputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp
index ce8f74d2e0..56906bc2a1 100644
--- a/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp
@@ -16,7 +16,7 @@ std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::AdditionQueueDescriptor>
const armnn::WorkloadInfo& info,
const armnn::AdditionQueueDescriptor& descriptor)
{
- return workloadFactory.CreateAddition(descriptor, info);
+ return workloadFactory.CreateWorkload(armnn::LayerType::Addition, descriptor, info);
}
LayerTestResult<float,4> AdditionTest(
@@ -231,7 +231,8 @@ LayerTestResult<T, 4> AdditionBroadcastTestImpl(
AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Addition,
+ data, info);
inputHandle1->Allocate();
inputHandle2->Allocate();
@@ -314,7 +315,8 @@ LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Addition,
+ data, info);
inputHandle1->Allocate();
inputHandle2->Allocate();
@@ -580,7 +582,9 @@ LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
// Create the MaxPool
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Pooling2d,
+ queueDescriptor,
+ workloadInfo);
std::vector<float> resultMaxPool(poolingOutputTensorInfo.GetNumElements());
@@ -611,7 +615,8 @@ LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
- std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
+ std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateWorkload(armnn::LayerType::Addition,
+ data, info);
poolingInputHandle->Allocate();
poolingOutputHandle->Allocate();
@@ -685,8 +690,10 @@ LayerTestResult<float,4> CompareAdditionTest(
SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
- std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Addition,
+ data, info);
+ std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateWorkload(armnn::LayerType::Addition,
+ refData, refInfo);
inputHandle1->Allocate();
inputHandle2->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
index e98708fd02..88d92d22f1 100644
--- a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
@@ -41,7 +41,8 @@ LayerTestResult<int32_t, 3> ArgMinMaxTestCommon(
AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateArgMinMax(descriptor, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::ArgMinMax,
+ descriptor, info);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
index b4414050e7..928918c83a 100644
--- a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
@@ -94,7 +94,8 @@ LayerTestResult<T, 4> BatchNormTestImpl(
AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(descriptor, info);
+ std::unique_ptr<armnn::IWorkload> workload
+ = workloadFactory.CreateWorkload(armnn::LayerType::BatchNormalization, descriptor, info);
inputHandle->Allocate();
outputHandle->Allocate();
@@ -192,7 +193,8 @@ LayerTestResult<T,4> BatchNormTestNhwcImpl(
},
qScale, qOffset);
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
+ std::unique_ptr<armnn::IWorkload> workload
+ = workloadFactory.CreateWorkload(armnn::LayerType::BatchNormalization, data, info);
inputHandle->Allocate();
outputHandle->Allocate();
@@ -672,8 +674,10 @@ LayerTestResult<float,4> CompareBatchNormTest(
SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
- std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
+ std::unique_ptr<armnn::IWorkload> workload
+ = workloadFactory.CreateWorkload(armnn::LayerType::BatchNormalization, data, info);
+ std::unique_ptr<armnn::IWorkload> workloadRef
+ = refWorkloadFactory.CreateWorkload(armnn::LayerType::BatchNormalization, refData, refInfo);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp
index 23e790f15a..a5fdfa64d0 100644
--- a/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp
@@ -64,7 +64,8 @@ LayerTestResult<T, OutputDim> BatchToSpaceNdHelper(
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchToSpaceNd(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::BatchToSpaceNd,
+ data, info);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/CastTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/CastTestImpl.cpp
index 92bce4f460..8d60cf1b0f 100644
--- a/src/backends/backendsCommon/test/layerTests/CastTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/CastTestImpl.cpp
@@ -41,7 +41,7 @@ LayerTestResult<TOutput, 4> CastTest(armnn::IWorkloadFactory& workloadFactory,
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateCast(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Cast, data, info);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/ChannelShuffleTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ChannelShuffleTestImpl.cpp
index 08a68cc04c..74b7bc3b07 100644
--- a/src/backends/backendsCommon/test/layerTests/ChannelShuffleTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ChannelShuffleTestImpl.cpp
@@ -35,7 +35,8 @@ LayerTestResult<T, NumDims> ChannelShuffleTestImpl(
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateChannelShuffle(data, info);
+ std::unique_ptr<armnn::IWorkload> workload
+ = workloadFactory.CreateWorkload(armnn::LayerType::ChannelShuffle, data, info);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
index 2640c329b3..016d27897e 100644
--- a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
@@ -66,7 +66,8 @@ LayerTestResult<uint8_t, NumDims> ComparisonTestImpl(
AddInputToWorkload(qDescriptor, info, inputTensorInfo1, inputHandle1.get());
AddOutputToWorkload(qDescriptor, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateComparison(qDescriptor, info);
+ std::unique_ptr<armnn::IWorkload> workload
+ = workloadFactory.CreateWorkload(armnn::LayerType::Comparison, qDescriptor, info);
inputHandle0->Allocate();
inputHandle1->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
index 8fbd3e39d8..88a392cf18 100644
--- a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
@@ -147,7 +147,9 @@ template<typename T> void PermuteTensorData(
AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
- std::unique_ptr<IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
+ std::unique_ptr<IWorkload> workload = workloadFactory.CreateWorkload(LayerType::Permute,
+ queueDescriptor,
+ workloadInfo);
inputHandle->Allocate();
outputHandle->Allocate();
@@ -379,7 +381,8 @@ template<typename T> void Concatenate(
AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
- std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(queueDescriptor, workloadInfo);
+ std::unique_ptr<IWorkload> workload
+ = workloadFactory.CreateWorkload(LayerType::Concat, queueDescriptor, workloadInfo);
for (auto& inputHandle : inputHandles)
{
@@ -2069,7 +2072,7 @@ LayerTestResult<T, 3> ConcatDifferentInputOutputQParamTest(
data.m_ViewOrigins.push_back(window1);
data.m_ViewOrigins.push_back(window2);
- std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
+ std::unique_ptr<IWorkload> workload = workloadFactory.CreateWorkload(LayerType::Concat, data, info);
inputHandle1->Allocate();
inputHandle2->Allocate();
@@ -2217,7 +2220,7 @@ LayerTestResult<float,3> ConcatTest(
data.m_ViewOrigins.push_back(window1);
data.m_ViewOrigins.push_back(window2);
- std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
+ std::unique_ptr<IWorkload> workload = workloadFactory.CreateWorkload(LayerType::Concat, data, info);
inputHandle1->Allocate();
inputHandle2->Allocate();
@@ -2549,7 +2552,7 @@ LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
data.m_ViewOrigins.push_back(window1);
data.m_ViewOrigins.push_back(window2);
- std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
+ std::unique_ptr<IWorkload> workload = workloadFactory.CreateWorkload(LayerType::Concat, data, info);
inputHandle1->Allocate();
inputHandle2->Allocate();
@@ -2687,7 +2690,7 @@ LayerTestResult<uint8_t, 3> ConcatUint8Test(
data.m_ViewOrigins.push_back(window1);
data.m_ViewOrigins.push_back(window2);
- std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
+ std::unique_ptr<IWorkload> workload = workloadFactory.CreateWorkload(LayerType::Concat, data, info);
inputHandle1->Allocate();
inputHandle2->Allocate();
@@ -2826,7 +2829,7 @@ LayerTestResult<uint16_t, 3> ConcatUint16Test(
data.m_ViewOrigins.push_back(window1);
data.m_ViewOrigins.push_back(window2);
- std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
+ std::unique_ptr<IWorkload> workload = workloadFactory.CreateWorkload(LayerType::Concat, data, info);
inputHandle1->Allocate();
inputHandle2->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp
index f7d84157ad..c9da74985e 100644
--- a/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp
@@ -108,7 +108,9 @@ LayerTestResult<T, 4> ConstantTestImpl(
armnn::WorkloadInfo info;
AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Constant,
+ descriptor,
+ info);
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
index 0982d01e2e..33dfc23b7d 100644
--- a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
@@ -344,7 +344,9 @@ LayerTestResult<T, 4> SimpleConvolution2dTestImpl(
data.m_Parameters.m_DilationX = dilationX;
data.m_Parameters.m_DilationY = dilationY;
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvolution2d(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Convolution2d,
+ data,
+ info);
inputHandle->Allocate();
outputHandle->Allocate();
@@ -444,7 +446,9 @@ LayerTestResult<O, 4> SimpleConvolution2dNhwcTestImpl(
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvolution2d(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Convolution2d,
+ data,
+ info);
inputHandle->Allocate();
outputHandle->Allocate();
@@ -570,7 +574,9 @@ LayerTestResult<T,4> Convolution1dTestImpl(
data.m_Parameters.m_PadBottom = padSize;
data.m_Parameters.m_BiasEnabled = biasEnabled;
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvolution2d(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Convolution2d,
+ data,
+ info);
inputHandle->Allocate();
outputHandle->Allocate();
@@ -1388,8 +1394,10 @@ LayerTestResult<T,4> CompareConvolution2dTestImpl(
SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvolution2d(data, info);
- std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateConvolution2d(refData, refInfo);
+ std::unique_ptr<armnn::IWorkload> workload
+ = workloadFactory.CreateWorkload(armnn::LayerType::Convolution2d, data, info);
+ std::unique_ptr<armnn::IWorkload> workloadRef
+ = refWorkloadFactory.CreateWorkload(armnn::LayerType::Convolution2d, refData, refInfo);
outputHandleRef->Allocate();
inputHandleRef->Allocate();
@@ -1756,7 +1764,8 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestImpl(
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(data, info);
+ std::unique_ptr<armnn::IWorkload> workload
+ = workloadFactory.CreateWorkload(armnn::LayerType::DepthwiseConvolution2d, data, info);
inputHandle->Allocate();
outputHandle->Allocate();
@@ -1905,7 +1914,8 @@ LayerTestResult<T, 4> DepthwiseConvolution2dDepthMul1TestImpl(
data.m_Parameters.m_BiasEnabled = biasEnabled;
data.m_Parameters.m_DataLayout = layout;
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(data, info);
+ std::unique_ptr<armnn::IWorkload> workload
+ = workloadFactory.CreateWorkload(armnn::LayerType::DepthwiseConvolution2d, data, info);
inputHandle->Allocate();
outputHandle->Allocate();
@@ -2100,7 +2110,8 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
data.m_Parameters.m_BiasEnabled = biasEnabled;
data.m_Parameters.m_DataLayout = layout;
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(data, info);
+ std::unique_ptr<armnn::IWorkload> workload
+ = workloadFactory.CreateWorkload(armnn::LayerType::DepthwiseConvolution2d, data, info);
inputHandle->Allocate();
outputHandle->Allocate();
@@ -2266,7 +2277,8 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
data.m_Parameters.m_DilationX = dilationX;
data.m_Parameters.m_DilationY = dilationY;
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(data, info);
+ std::unique_ptr<armnn::IWorkload> workload
+ = workloadFactory.CreateWorkload(armnn::LayerType::DepthwiseConvolution2d, data, info);
inputHandle->Allocate();
outputHandle->Allocate();
@@ -2989,8 +3001,10 @@ LayerTestResult<T, 4> CompareDepthwiseConvolution2dTestImpl(
SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(data, info);
- std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateDepthwiseConvolution2d(refData, refInfo);
+ std::unique_ptr<armnn::IWorkload> workload
+ = workloadFactory.CreateWorkload(armnn::LayerType::DepthwiseConvolution2d, data, info);
+ std::unique_ptr<armnn::IWorkload> workloadRef
+ = refWorkloadFactory.CreateWorkload(armnn::LayerType::DepthwiseConvolution2d, refData, refInfo);
outputHandleRef->Allocate();
inputHandleRef->Allocate();
@@ -3474,7 +3488,9 @@ LayerTestResult<uint8_t, 4> Convolution2dPerAxisQuantTest(
AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
- std::unique_ptr<IWorkload> workload = workloadFactory.CreateConvolution2d(queueDescriptor, workloadInfo);
+ std::unique_ptr<IWorkload> workload= workloadFactory.CreateWorkload(armnn::LayerType::Convolution2d,
+ queueDescriptor,
+ workloadInfo);
inputHandle->Allocate();
outputHandle->Allocate();
@@ -3740,7 +3756,9 @@ LayerTestResult<uint8_t, 4> DepthwiseConvolution2dPerAxisQuantTest(
AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
- std::unique_ptr<IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(queueDescriptor, workloadInfo);
+ std::unique_ptr<IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::DepthwiseConvolution2d,
+ queueDescriptor,
+ workloadInfo);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp
index 45cf48b40e..406fcb4700 100644
--- a/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp
@@ -275,7 +275,9 @@ LayerTestResult<T, 5> SimpleConvolution3dTestImpl(
AddInputToWorkload(data, info, biasDesc, input2Handle.get());
}
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvolution3d(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Convolution3d,
+ data,
+ info);
input0Handle->Allocate();
input1Handle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp
index e4ba0b8c0b..49e1cebc0f 100644
--- a/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp
@@ -42,7 +42,9 @@ LayerTestResult<float, 4> ConvertBf16ToFp32Test(
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvertBf16ToFp32(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::ConvertBf16ToFp32,
+ data,
+ info);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp
index 74c03d939b..d581032838 100644
--- a/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp
@@ -46,7 +46,9 @@ LayerTestResult<float, 4> SimpleConvertFp16ToFp32Test(
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvertFp16ToFp32(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::ConvertFp16ToFp32,
+ data,
+ info);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp
index 667fd29c24..61a55f26c2 100644
--- a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp
@@ -63,7 +63,9 @@ LayerTestResult<armnn::BFloat16, 4> ConvertFp32ToBf16Test(
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvertFp32ToBf16(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::ConvertFp32ToBf16,
+ data,
+ info);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp
index 13dde067b0..060e7a2762 100644
--- a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp
@@ -44,7 +44,9 @@ LayerTestResult<armnn::Half, 4> SimpleConvertFp32ToFp16Test(
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvertFp32ToFp16(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::ConvertFp32ToFp16,
+ data,
+ info);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp
index 2938489d63..d2cbdd1412 100644
--- a/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp
@@ -56,7 +56,9 @@ LayerTestResult<T, Dim> DebugTestImpl(
AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDebug(descriptor, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Debug,
+ descriptor,
+ info);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp
index ee4f1b3660..6476e7b0da 100644
--- a/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp
@@ -58,7 +58,9 @@ LayerTestResult<T, 4> DepthToSpaceTestImpl(
AddInputToWorkload(descriptor, info, inputInfo, inputHandle.get());
AddOutputToWorkload(descriptor, info, outputInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDepthToSpace(descriptor, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::DepthToSpace,
+ descriptor,
+ info);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp
index 98bf74f833..e8fabea3ba 100644
--- a/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp
@@ -39,7 +39,9 @@ LayerTestResult<T1, Dim> DequantizeTestImpl(
AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDequantize(descriptor, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Dequantize,
+ descriptor,
+ info);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp
index 52aacad38c..43e7d15158 100644
--- a/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp
@@ -200,7 +200,9 @@ void DetectionPostProcessImpl(const armnn::TensorInfo& boxEncodingsInfo,
AddOutputToWorkload(data, info, detectionScoresInfo, outputScoresHandle.get());
AddOutputToWorkload(data, info, numDetectionInfo, numDetectionHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDetectionPostProcess(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::DetectionPostProcess,
+ data,
+ info);
boxedHandle->Allocate();
scoreshandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp
index f3688347d2..e355ec69c1 100644
--- a/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp
@@ -13,7 +13,7 @@ std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::DivisionQueueDescriptor>
const armnn::WorkloadInfo& info,
const armnn::DivisionQueueDescriptor& descriptor)
{
- return workloadFactory.CreateDivision(descriptor, info);
+ return workloadFactory.CreateWorkload(armnn::LayerType::Division, descriptor, info);
}
LayerTestResult<float, 4> DivisionByZeroTest(
diff --git a/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.cpp
index a2c88a62e7..c821e83835 100644
--- a/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ElementwiseUnaryTestImpl.cpp
@@ -10,5 +10,5 @@ std::unique_ptr<armnn::IWorkload> CreateWorkload(
const armnn::WorkloadInfo& info,
const armnn::ElementwiseUnaryQueueDescriptor& descriptor)
{
- return workloadFactory.CreateElementwiseUnary(descriptor, info);
+ return workloadFactory.CreateWorkload(armnn::LayerType::ElementwiseUnary, descriptor, info);
} \ No newline at end of file
diff --git a/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp
index 613f8b7f6a..e2fb6fa373 100644
--- a/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp
@@ -54,7 +54,9 @@ LayerTestResult<float, 2> FakeQuantizationTest(
armnn::WorkloadInfo refInfo = info;
SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::FakeQuantization,
+ data,
+ info);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/FillTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FillTestImpl.cpp
index fae2691af7..1be0e40871 100644
--- a/src/backends/backendsCommon/test/layerTests/FillTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/FillTestImpl.cpp
@@ -40,7 +40,7 @@ LayerTestResult<T, 4> SimpleFillTest(
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFill(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Fill, data, info);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp
index 527b0dbd13..fbd962584a 100644
--- a/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp
@@ -47,7 +47,7 @@ LayerTestResult<T, 4> SimpleFloorTest(
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFloor(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Floor, data, info);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
index 9361f4d2b3..71d2d0a81e 100644
--- a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
@@ -69,7 +69,9 @@ LayerTestResult<T, 2> SimpleFullyConnectedTestImpl(
AddInputToWorkload(data, info, biasesTensorInfo, input2Handle.get());
}
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFullyConnected(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::FullyConnected,
+ data,
+ info);
LayerTestResult<T, 2> result(outputTensorInfo);
input0Handle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp
index ad81968b71..c89d53be66 100644
--- a/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp
@@ -46,7 +46,7 @@ LayerTestResult<T, OutputDim> GatherTestImpl(
AddInputToWorkload(data, info, indicesInfo, indicesHandle.get());
AddOutputToWorkload(data, info, outputInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateGather(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Gather, data, info);
paramsHandle->Allocate();
indicesHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
index 9de7df7029..bebbedad87 100644
--- a/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
@@ -48,7 +48,8 @@ LayerTestResult<T, 4> InstanceNormTestImpl(
AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateInstanceNormalization(descriptor, info);
+ std::unique_ptr<armnn::IWorkload> workload
+ = workloadFactory.CreateWorkload(armnn::LayerType::InstanceNormalization, descriptor, info);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp
index f7566fd014..0a60658b47 100644
--- a/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp
@@ -79,7 +79,9 @@ LayerTestResult<T, 4> L2NormalizationTestImpl(
AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::L2Normalization,
+ descriptor,
+ info);
inputHandle->Allocate();
outputHandle->Allocate();
@@ -740,7 +742,9 @@ LayerTestResult<float, 2> L2Normalization2dShapeTest(
AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::L2Normalization,
+ descriptor,
+ info);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp
index 016d143e35..cb182d6d3f 100644
--- a/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp
@@ -52,7 +52,9 @@ LayerTestResult<T, NumDims> LogSoftmaxTestImpl(
AddInputToWorkload(descriptor, info, inputInfo, inputHandle.get());
AddOutputToWorkload(descriptor, info, outputInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLogSoftmax(descriptor, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::LogSoftmax,
+ descriptor,
+ info);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp
index d2fa2bd6f7..60e14b5d9d 100644
--- a/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp
@@ -48,7 +48,7 @@ LayerTestResult<uint8_t, NumDims> LogicalUnaryTestHelper(
AddInputToWorkload(qDesc, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(qDesc, info, outputTensorInfo, outputHandle.get());
- auto workload = workloadFactory.CreateElementwiseUnary(qDesc, info);
+ auto workload = workloadFactory.CreateWorkload(armnn::LayerType::ElementwiseUnary, qDesc, info);
inputHandle->Allocate();
outputHandle->Allocate();
@@ -104,7 +104,7 @@ LayerTestResult<uint8_t, NumDims> LogicalBinaryTestHelper(
AddInputToWorkload(qDesc, info, inputTensorInfo1, inputHandle1.get());
AddOutputToWorkload(qDesc, info, outputTensorInfo, outputHandle.get());
- auto workload = workloadFactory.CreateLogicalBinary(qDesc, info);
+ auto workload = workloadFactory.CreateWorkload(armnn::LayerType::LogicalBinary, qDesc, info);
inputHandle0->Allocate();
inputHandle1->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
index c04e97bb0f..a69f7270b4 100644
--- a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
@@ -314,7 +314,7 @@ LstmNoCifgNoPeepholeNoProjectionTestImpl(
data.m_Parameters.m_PeepholeEnabled = false;
data.m_Parameters.m_ProjectionEnabled = false;
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLstm(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Lstm, data, info);
inputHandle->Allocate();
outputStateInHandle->Allocate();
cellStateInHandle->Allocate();
@@ -987,7 +987,7 @@ LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workl
data.m_Parameters.m_PeepholeEnabled = true;
data.m_Parameters.m_ProjectionEnabled = true;
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLstm(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Lstm, data, info);
inputHandle->Allocate();
outputStateInHandle->Allocate();
cellStateInHandle->Allocate();
@@ -1211,7 +1211,7 @@ LayerTestResult<T, 2> LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(
AddOutputToWorkload(data, info, cellStateOutTensorInfo, cellStateOutHandle.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLstm(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Lstm, data, info);
inputHandle->Allocate();
outputStateInHandle->Allocate();
@@ -1464,7 +1464,7 @@ LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl(armnn::IWorkloadF
data.m_Parameters.m_LayerNormEnabled = true;
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLstm(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Lstm, data, info);
inputHandle->Allocate();
outputStateInHandle->Allocate();
cellStateInHandle->Allocate();
@@ -1653,7 +1653,9 @@ LayerTestResult<uint8_t, 2> QuantizedLstmTestImpl(
data.m_OutputGateBias = &outputGateBiasTensor;
// Create workload and allocate tensor handles
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateQuantizedLstm(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::QuantizedLstm,
+ data,
+ info);
inputHandle->Allocate();
outputStateInHandle->Allocate();
cellStateInHandle->Allocate();
@@ -1890,7 +1892,7 @@ LayerTestResult<int8_t, 2> QLstmTestImpl(
data.m_Parameters.m_ProjectionClip = projectionClip;
// Create workload and allocate tensor handles
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateQLstm(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::QLstm, data, info);
inputHandle->Allocate();
outputStateInHandle->Allocate();
cellStateInHandle->Allocate();
@@ -2155,7 +2157,7 @@ LayerTestResult<int8_t, 2> QLstmTestImpl1(
data.m_Parameters.m_ProjectionClip = projectionClip;
// Create workload and allocate tensor handles
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateQLstm(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::QLstm, data, info);
inputHandle->Allocate();
outputStateInHandle->Allocate();
cellStateInHandle->Allocate();
@@ -2406,7 +2408,7 @@ LayerTestResult<int8_t, 2> QLstmTestImpl2(
data.m_Parameters.m_ProjectionClip = projectionClip;
// Create workload and allocate tensor handles
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateQLstm(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::QLstm, data, info);
inputHandle->Allocate();
outputStateInHandle->Allocate();
cellStateInHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp
index 1ef8f9a4e9..c2c7cd520a 100644
--- a/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp
@@ -13,7 +13,7 @@ std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MaximumQueueDescriptor>(
const armnn::WorkloadInfo& info,
const armnn::MaximumQueueDescriptor& descriptor)
{
- return workloadFactory.CreateMaximum(descriptor, info);
+ return workloadFactory.CreateWorkload(armnn::LayerType::Maximum, descriptor, info);
}
LayerTestResult<float, 4> MaximumSimpleTest(
diff --git a/src/backends/backendsCommon/test/layerTests/MeanTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/MeanTestImpl.hpp
index 29dba2f152..eacb4e0f1b 100644
--- a/src/backends/backendsCommon/test/layerTests/MeanTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/MeanTestImpl.hpp
@@ -55,7 +55,7 @@ LayerTestResult<T, OutputDim> MeanTestHelper(
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMean(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Mean, data, info);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp
index 015d055dcc..ff31d27774 100644
--- a/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp
@@ -13,7 +13,7 @@ std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MinimumQueueDescriptor>(
const armnn::WorkloadInfo& info,
const armnn::MinimumQueueDescriptor& descriptor)
{
- return workloadFactory.CreateMinimum(descriptor, info);
+ return workloadFactory.CreateWorkload(armnn::LayerType::Minimum, descriptor, info);
}
LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
diff --git a/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.cpp
index 2647bb976f..d99e5c8fbd 100644
--- a/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.cpp
@@ -43,7 +43,9 @@ LayerTestResult<T, 2> MirrorPad2dTestCommon(
AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Pad,
+ descriptor,
+ info);
inputHandle->Allocate();
outputHandle->Allocate();
@@ -86,7 +88,9 @@ LayerTestResult<T, 3> MirrorPad3dTestCommon(
AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Pad,
+ descriptor,
+ info);
inputHandle->Allocate();
outputHandle->Allocate();
@@ -129,7 +133,9 @@ LayerTestResult<T, 4> MirrorPad4dTestCommon(
AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Pad,
+ descriptor,
+ info);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp
index c7b082183c..eab24810d5 100644
--- a/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp
@@ -14,7 +14,7 @@ std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MultiplicationQueueDescr
const armnn::WorkloadInfo& info,
const armnn::MultiplicationQueueDescriptor& descriptor)
{
- return workloadFactory.CreateMultiplication(descriptor, info);
+ return workloadFactory.CreateWorkload(armnn::LayerType::Multiplication, descriptor, info);
}
LayerTestResult<float, 4> MultiplicationTest(armnn::IWorkloadFactory& workloadFactory,
@@ -571,8 +571,10 @@ LayerTestResult<float,4> CompareMultiplicationTest(
SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
- std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
+ std::unique_ptr<armnn::IWorkload> workload
+ = workloadFactory.CreateWorkload(armnn::LayerType::Multiplication, data, info);
+ std::unique_ptr<armnn::IWorkload> workloadRef
+ = refWorkloadFactory.CreateWorkload(armnn::LayerType::Multiplication, refData, refInfo);
inputHandle0->Allocate();
inputHandle1->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
index 4f2add51ff..2b2ff0cc14 100644
--- a/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
@@ -82,7 +82,9 @@ LayerTestResult<float,4> SimpleNormalizationTestImpl(
armnn::WorkloadInfo refInfo = info;
SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Normalization,
+ data,
+ info);
inputHandle->Allocate();
outputHandle->Allocate();
@@ -237,7 +239,9 @@ LayerTestResult<float,4> SimpleNormalizationNhwcTestImpl(
armnn::WorkloadInfo refInfo = info;
SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Normalization,
+ data,
+ info);
inputHandle->Allocate();
outputHandle->Allocate();
@@ -355,8 +359,10 @@ LayerTestResult<float,4> CompareNormalizationTestImpl(
return ret;
}
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
- std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateNormalization(refData, refInfo);
+ std::unique_ptr<armnn::IWorkload> workload
+ = workloadFactory.CreateWorkload(armnn::LayerType::Normalization, data, info);
+ std::unique_ptr<armnn::IWorkload> workloadRef
+ = refWorkloadFactory.CreateWorkload(armnn::LayerType::Normalization, refData, refInfo);
outputHandleRef->Allocate();
inputHandleRef->Allocate();
@@ -438,7 +444,9 @@ LayerTestResult<float,4> AcrossChannelNormalizationTestImpl(
armnn::WorkloadInfo refInfo = info;
SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Normalization,
+ data,
+ info);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp
index ab2ef24a2b..b1d8c31dfc 100644
--- a/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp
@@ -72,7 +72,9 @@ LayerTestResult<T, 2> Pad2dTestCommon(
AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Pad,
+ descriptor,
+ info);
inputHandle->Allocate();
outputHandle->Allocate();
@@ -157,7 +159,9 @@ LayerTestResult<T, 3> Pad3dTestCommon(
AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Pad,
+ descriptor,
+ info);
inputHandle->Allocate();
outputHandle->Allocate();
@@ -397,7 +401,9 @@ LayerTestResult<T, 4> Pad4dTestCommon(
AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Pad,
+ descriptor,
+ info);
inputHandle->Allocate();
outputHandle->Allocate();
@@ -470,7 +476,9 @@ LayerTestResult<T, 2> PadQAsymmTestCommon(
AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Pad,
+ descriptor,
+ info);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp
index fb55f08c54..bffa3d47a1 100644
--- a/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp
@@ -39,7 +39,7 @@ LayerTestResult<T, 4> SimplePermuteTestImpl(
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Permute, data, info);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp
index 248f9726a1..643a5df6be 100644
--- a/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp
@@ -100,7 +100,9 @@ LayerTestResult<T, 4> SimplePooling2dTestImpl(
return result;
}
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Pooling2d,
+ queueDescriptor,
+ workloadInfo);
inputHandle->Allocate();
outputHandle->Allocate();
@@ -830,8 +832,10 @@ LayerTestResult<T, 4> ComparePooling2dTestCommon(
SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(data, info);
- std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreatePooling2d(refData, refInfo);
+ std::unique_ptr<armnn::IWorkload> workload
+ = workloadFactory.CreateWorkload(armnn::LayerType::Pooling2d, data, info);
+ std::unique_ptr<armnn::IWorkload> workloadRef
+ = refWorkloadFactory.CreateWorkload(armnn::LayerType::Pooling2d, refData, refInfo);
outputHandleRef->Allocate();
inputHandleRef->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/Pooling3dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Pooling3dTestImpl.cpp
index 3befc7c585..013ed03f08 100644
--- a/src/backends/backendsCommon/test/layerTests/Pooling3dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Pooling3dTestImpl.cpp
@@ -104,7 +104,9 @@ LayerTestResult<T, 5> SimplePooling3dTestImpl(
return result;
}
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling3d(queueDescriptor, workloadInfo);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Pooling3d,
+ queueDescriptor,
+ workloadInfo);
inputHandle->Allocate();
outputHandle->Allocate();
@@ -1035,8 +1037,10 @@ LayerTestResult<T, 5> ComparePooling3dTestCommon(
SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling3d(data, info);
- std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreatePooling3d(refData, refInfo);
+ std::unique_ptr<armnn::IWorkload> workload
+ = workloadFactory.CreateWorkload(armnn::LayerType::Pooling3d, data, info);
+ std::unique_ptr<armnn::IWorkload> workloadRef
+ = refWorkloadFactory.CreateWorkload(armnn::LayerType::Pooling3d, refData, refInfo);
outputHandleRef->Allocate();
inputHandleRef->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp
index a74be62b66..02452452a3 100644
--- a/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp
@@ -84,7 +84,9 @@ LayerTestResult<T, 4> PreluTest(
AddInputToWorkload (descriptor, info, alphaTensorInfo, alphaHandle.get());
AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePrelu(descriptor, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Prelu,
+ descriptor,
+ info);
inputHandle->Allocate();
alphaHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp
index 5878ec8d9b..38d6fbaac1 100644
--- a/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp
@@ -40,7 +40,9 @@ LayerTestResult<T, Dim> QuantizeTestImpl(
AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateQuantize(descriptor, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Quantize,
+ descriptor,
+ info);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/RankTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/RankTestImpl.cpp
index ef45eec8c5..be22fc6eed 100644
--- a/src/backends/backendsCommon/test/layerTests/RankTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/RankTestImpl.cpp
@@ -35,7 +35,7 @@ LayerTestResult<int32_t, 1> RankTest(
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateRank(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Rank, data, info);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/ReduceProdTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ReduceProdTestImpl.cpp
index a8b1fda8dc..9506f36bd4 100644
--- a/src/backends/backendsCommon/test/layerTests/ReduceProdTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ReduceProdTestImpl.cpp
@@ -59,7 +59,9 @@ LayerTestResult<float, 4> ReduceTestCommon(
AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateReduce(descriptor, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Reduce,
+ descriptor,
+ info);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/ReduceSumTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ReduceSumTestImpl.cpp
index acb2990c98..c8a61bb42d 100644
--- a/src/backends/backendsCommon/test/layerTests/ReduceSumTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ReduceSumTestImpl.cpp
@@ -59,7 +59,9 @@ LayerTestResult<float, 4> ReduceTestCommon(
AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateReduce(descriptor, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Reduce,
+ descriptor,
+ info);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/ReductionTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ReductionTestImpl.cpp
index 47cb1d730a..4edf3dd689 100644
--- a/src/backends/backendsCommon/test/layerTests/ReductionTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ReductionTestImpl.cpp
@@ -61,7 +61,9 @@ LayerTestResult<float, 4> ReductionTestCommon(
AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateReduce(descriptor, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Reduce,
+ descriptor,
+ info);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp
index ae9280b10f..217d8e9d4b 100644
--- a/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp
@@ -35,7 +35,7 @@ LayerTestResult<T, NumDims> SimpleReshapeTestImpl(
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateReshape(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Reshape, data, info);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp
index 1773809d70..2e8cc66c09 100644
--- a/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp
@@ -123,7 +123,9 @@ LayerTestResult<T, NumDims> ResizeTestImpl(
AddInputToWorkload(descriptor, info, inputInfo, inputHandle.get());
AddOutputToWorkload(descriptor, info, outputInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResize(descriptor, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Resize,
+ descriptor,
+ info);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/ShapeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ShapeTestImpl.cpp
index 5aa3b7ca24..e30912c9ed 100644
--- a/src/backends/backendsCommon/test/layerTests/ShapeTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ShapeTestImpl.cpp
@@ -33,7 +33,7 @@ LayerTestResult<int32_t, 1> ShapeTest(
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateShape(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Shape, data, info);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp
index 32abf35576..95d2320508 100644
--- a/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp
@@ -52,7 +52,9 @@ LayerTestResult<T, NumDims> SliceTestImpl(
AddInputToWorkload(descriptor, info, inputInfo, inputHandle.get());
AddOutputToWorkload(descriptor, info, outputInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSlice(descriptor, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Slice,
+ descriptor,
+ info);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
index e8b4ee5af3..1956533611 100644
--- a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
@@ -98,7 +98,7 @@ LayerTestResult<T, n> SimpleSoftmaxBaseTestImpl(
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSoftmax(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Softmax, data, info);
inputHandle->Allocate();
outputHandle->Allocate();
@@ -283,8 +283,10 @@ LayerTestResult<T, 2> CompareSoftmaxTestImpl(
SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSoftmax(data, info);
- std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateSoftmax(refData, refInfo);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Softmax, data, info);
+ std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateWorkload(armnn::LayerType::Softmax,
+ refData,
+ refInfo);
outputHandleRef->Allocate();
inputHandleRef->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp
index fe6aa70b87..103e3363ba 100644
--- a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp
@@ -69,7 +69,9 @@ LayerTestResult<T, 4> SpaceToBatchNdTestImpl(
AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSpaceToBatchNd(descriptor, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::SpaceToBatchNd,
+ descriptor,
+ info);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp
index 0080bb6243..f67ed9bcd9 100644
--- a/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp
@@ -70,7 +70,9 @@ LayerTestResult<T, 4> SpaceToDepthTestImpl(
AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSpaceToDepth(descriptor, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::SpaceToDepth,
+ descriptor,
+ info);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp
index cfb6a1f9c7..f2ee7bff3d 100644
--- a/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp
@@ -207,7 +207,9 @@ std::vector<LayerTestResult<T,3>> SplitterTestCommon(
data.m_ViewOrigins.push_back(window1);
data.m_ViewOrigins.push_back(window2);
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSplitter(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Splitter,
+ data,
+ info);
inputHandle->Allocate();
outputHandle1->Allocate();
@@ -230,7 +232,9 @@ std::vector<LayerTestResult<T,3>> SplitterTestCommon(
data2.m_ViewOrigins.push_back(window3);
data2.m_ViewOrigins.push_back(window4);
- std::unique_ptr<armnn::IWorkload> workload2 = workloadFactory.CreateSplitter(data2, info2);
+ std::unique_ptr<armnn::IWorkload> workload2 = workloadFactory.CreateWorkload(armnn::LayerType::Splitter,
+ data2,
+ info2);
outputHandle3->Allocate();
outputHandle4->Allocate();
@@ -305,7 +309,9 @@ LayerTestResult<T, 3> CopyViaSplitterTestImpl(
data.m_ViewOrigins.push_back(window);
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSplitter(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Splitter,
+ data,
+ info);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp
index 5250c3af73..252adb93ac 100644
--- a/src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp
@@ -65,7 +65,9 @@ LayerTestResult<T, outputDimLength> StackTestHelper(
AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
outputHandle->Allocate();
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateStack(descriptor, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Stack,
+ descriptor,
+ info);
workload->Execute();
diff --git a/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp
index b4818bb12b..865b74c610 100644
--- a/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp
@@ -54,7 +54,9 @@ LayerTestResult<T, OutDim> StridedSliceTestImpl(
AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateStridedSlice(descriptor, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::StridedSlice,
+ descriptor,
+ info);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.cpp
index 3c4946e44a..61e76ed1ab 100644
--- a/src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.cpp
@@ -13,7 +13,7 @@ std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::SubtractionQueueDescript
const armnn::WorkloadInfo& info,
const armnn::SubtractionQueueDescriptor& descriptor)
{
- return workloadFactory.CreateSubtraction(descriptor, info);
+ return workloadFactory.CreateWorkload(armnn::LayerType::Subtraction, descriptor, info);
}
LayerTestResult<uint8_t, 4> SubtractionUint8Test(
diff --git a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
index 95687e3c25..41e57de734 100644
--- a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
@@ -96,7 +96,7 @@ void TransposeConvolution2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
AddOutputToWorkload(queueDescriptor, workloadInfo, output.first, outputHandle.get());
std::unique_ptr<armnn::IWorkload> workload =
- workloadFactory.CreateTransposeConvolution2d(queueDescriptor, workloadInfo);
+ workloadFactory.CreateWorkload(armnn::LayerType::TransposeConvolution2d, queueDescriptor, workloadInfo);
inputHandle->Allocate();
outputHandle->Allocate();
@@ -658,7 +658,9 @@ LayerTestResult<uint8_t, 4> TransposeConvolution2dPerAxisQuantTest(
AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
- std::unique_ptr<IWorkload> workload = workloadFactory.CreateTransposeConvolution2d(queueDescriptor, workloadInfo);
+ std::unique_ptr<IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::TransposeConvolution2d,
+ queueDescriptor,
+ workloadInfo);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/TransposeTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/TransposeTestImpl.hpp
index 82bd487d3c..66652f28da 100644
--- a/src/backends/backendsCommon/test/layerTests/TransposeTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/TransposeTestImpl.hpp
@@ -39,7 +39,9 @@ LayerTestResult<T, 4> SimpleTransposeTestImpl(
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateTranspose(data, info);
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Transpose,
+ data,
+ info);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/UnidirectionalSequenceLstmTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/UnidirectionalSequenceLstmTestImpl.cpp
index e6b50f614a..66a26cc41d 100644
--- a/src/backends/backendsCommon/test/layerTests/UnidirectionalSequenceLstmTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/UnidirectionalSequenceLstmTestImpl.cpp
@@ -169,7 +169,8 @@ LayerTestResult<T, 3> UnidirectionalSequenceLstmLayerFloat32TestImpl(
data.m_Parameters.m_ProjectionEnabled = false;
data.m_Parameters.m_TimeMajor = false;
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateUnidirectionalSequenceLstm(data, info);
+ std::unique_ptr<armnn::IWorkload> workload
+ = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
inputHandle->Allocate();
outputStateInHandle->Allocate();
cellStateInHandle->Allocate();
@@ -344,7 +345,8 @@ UnidirectionalSequenceLstmLayerFloat32TimeMajorTestImpl(
data.m_Parameters.m_ProjectionEnabled = false;
data.m_Parameters.m_TimeMajor = true;
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateUnidirectionalSequenceLstm(data, info);
+ std::unique_ptr<armnn::IWorkload> workload
+ = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
inputHandle->Allocate();
outputStateInHandle->Allocate();
cellStateInHandle->Allocate();
@@ -624,7 +626,8 @@ LayerTestResult<float, 3> UnidirectionalSequenceLstmLayerNoCifgWithPeepholeWithP
data.m_Parameters.m_ClippingThresCell = 10.0f;
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateUnidirectionalSequenceLstm(data, info);
+ std::unique_ptr<armnn::IWorkload> workload
+ = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
inputHandle->Allocate();
outputStateInHandle->Allocate();
cellStateInHandle->Allocate();
@@ -848,7 +851,8 @@ LayerTestResult<float, 3> UnidirectionalSequenceLstmLayerNoCifgWithPeepholeWithP
data.m_Parameters.m_TimeMajor = false;
data.m_Parameters.m_ClippingThresCell = 10.0f;
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateUnidirectionalSequenceLstm(data, info);
+ std::unique_ptr<armnn::IWorkload> workload
+ = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
inputHandle->Allocate();
outputStateInHandle->Allocate();
cellStateInHandle->Allocate();
@@ -1008,7 +1012,8 @@ LayerTestResult<float, 3> UnidirectionalSequenceLstmWithCifgWithPeepholeNoProjec
data.m_Parameters.m_ProjectionEnabled = false;
data.m_Parameters.m_TimeMajor = false;
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateUnidirectionalSequenceLstm(data, info);
+ std::unique_ptr<armnn::IWorkload> workload
+ = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
inputHandle->Allocate();
outputStateInHandle->Allocate();
cellStateInHandle->Allocate();
@@ -1147,7 +1152,8 @@ LayerTestResult<float, 3> UnidirectionalSequenceLstmLayerInt8Test(
data.m_Parameters.m_ProjectionEnabled = false;
data.m_Parameters.m_TimeMajor = false;
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateUnidirectionalSequenceLstm(data, info);
+ std::unique_ptr<armnn::IWorkload> workload
+ = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
inputHandle->Allocate();
outputStateInHandle->Allocate();
cellStateInHandle->Allocate();
@@ -1286,7 +1292,8 @@ LayerTestResult<float, 3> UnidirectionalSequenceLstmLayerInt8TimeMajorTest(
data.m_Parameters.m_ProjectionEnabled = false;
data.m_Parameters.m_TimeMajor = true;
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateUnidirectionalSequenceLstm(data, info);
+ std::unique_ptr<armnn::IWorkload> workload
+ = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
inputHandle->Allocate();
outputStateInHandle->Allocate();
cellStateInHandle->Allocate();
@@ -1449,7 +1456,8 @@ LayerTestResult<float, 3> UnidirectionalSequenceLstmLayerInt8NoCifgWithPeepholeW
data.m_Parameters.m_ClippingThresCell = 10.0f;
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateUnidirectionalSequenceLstm(data, info);
+ std::unique_ptr<armnn::IWorkload> workload
+ = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
inputHandle->Allocate();
outputStateInHandle->Allocate();
cellStateInHandle->Allocate();
@@ -1640,7 +1648,8 @@ LayerTestResult<float, 3> UnidirectionalSequenceLstmLayerInt8NoCifgWithPeepholeW
data.m_Parameters.m_TimeMajor = false;
data.m_Parameters.m_ClippingThresCell = 10.0f;
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateUnidirectionalSequenceLstm(data, info);
+ std::unique_ptr<armnn::IWorkload> workload
+ = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
inputHandle->Allocate();
outputStateInHandle->Allocate();
cellStateInHandle->Allocate();
@@ -1776,7 +1785,8 @@ LayerTestResult<float, 3> UnidirectionalSequenceLstmInt8WithCifgWithPeepholeNoPr
data.m_Parameters.m_ProjectionEnabled = false;
data.m_Parameters.m_TimeMajor = false;
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateUnidirectionalSequenceLstm(data, info);
+ std::unique_ptr<armnn::IWorkload> workload
+ = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
inputHandle->Allocate();
outputStateInHandle->Allocate();
cellStateInHandle->Allocate();
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index 4bdb84a5a5..0632787db0 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -244,6 +244,451 @@ std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateSubTensorHandle(ITensorH
PolymorphicDowncast<IClTensorHandle*>(&parent), shape, coords);
}
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateWorkload(LayerType type,
+ const QueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ switch(type)
+ {
+ case LayerType::Activation :
+ {
+ auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClActivationWorkload>(*activationQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::Addition :
+ {
+ auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClAdditionWorkload>(*additionQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::ArgMinMax :
+ {
+ auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
+ return std::make_unique<ClArgMinMaxWorkload>(*argMinMaxQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::BatchNormalization :
+ {
+ auto batchNormalizationQueueDescriptor
+ = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClBatchNormalizationFloatWorkload, NullWorkload>
+ (*batchNormalizationQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::BatchToSpaceNd :
+ {
+ auto batchToSpaceNdQueueDescriptor
+ = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClBatchToSpaceNdWorkload>(*batchToSpaceNdQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::Cast :
+ {
+ auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClCastWorkload>(*castQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::ChannelShuffle :
+ {
+ auto channelShuffleQueueDescriptor
+ = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClChannelShuffleWorkload>(*channelShuffleQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::Comparison :
+ {
+ auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClComparisonWorkload>(*comparisonQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::Concat :
+ {
+ auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClConcatWorkload>(*concatQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::Constant :
+ {
+ auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClConstantWorkload>(*constantQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::ConvertFp16ToFp32 :
+ {
+ auto convertFp16ToFp32QueueDescriptor
+ = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClConvertFp16ToFp32Workload>(*convertFp16ToFp32QueueDescriptor,
+ info,
+ m_CLCompileContext);
+ }
+ case LayerType::ConvertFp32ToFp16 :
+ {
+ auto convertFp32ToFp16QueueDescriptor
+ = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClConvertFp32ToFp16Workload>(*convertFp32ToFp16QueueDescriptor,
+ info,
+ m_CLCompileContext);
+ }
+ case LayerType::Convolution2d :
+ {
+ auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
+
+ bool isFastMathEnabled = false;
+ if (m_ModelContextPtr)
+ {
+ if (m_ModelContextPtr.get() != nullptr)
+ {
+ auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
+ if (modelOptions)
+ {
+ isFastMathEnabled = modelOptions->IsFastMathEnabled();
+ }
+ }
+ }
+ return MakeWorkload<ClConvolution2dWorkload>(*convolution2dQueueDescriptor,
+ info,
+ m_MemoryManager->GetIntraLayerManager(),
+ m_CLCompileContext,
+ isFastMathEnabled);
+ }
+ case LayerType::Convolution3d :
+ {
+ auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
+
+ bool isFastMathEnabled = false;
+ if (m_ModelContextPtr)
+ {
+ if (m_ModelContextPtr.get() != nullptr)
+ {
+ auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
+ if (modelOptions)
+ {
+ isFastMathEnabled = modelOptions->IsFastMathEnabled();
+ }
+ }
+ }
+ return MakeWorkload<ClConvolution3dWorkload>(*convolution3dQueueDescriptor,
+ info,
+ m_MemoryManager->GetIntraLayerManager(),
+ m_CLCompileContext,
+ isFastMathEnabled);
+ }
+ case LayerType::Debug :
+ {
+ auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
+ return MakeWorkload<NullWorkload, NullWorkload>(*debugQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::DepthToSpace :
+ {
+ auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClDepthToSpaceWorkload>(*depthToSpaceQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::DepthwiseConvolution2d :
+ {
+ auto depthwiseConvolution2dQueueDescriptor
+ = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClDepthwiseConvolutionWorkload>(*depthwiseConvolution2dQueueDescriptor,
+ info,
+ m_CLCompileContext);
+ }
+ case LayerType::Dequantize :
+ {
+ auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClDequantizeWorkload>(*dequantizeQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::DetectionPostProcess :
+ {
+ auto detectionPostProcessQueueDescriptor
+ = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
+ return MakeWorkload<NullWorkload, NullWorkload>(*detectionPostProcessQueueDescriptor,
+ info,
+ m_CLCompileContext);
+ }
+ case LayerType::Division :
+ {
+ auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
+ return std::make_unique<ClDivisionWorkload>(*divisionQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::ElementwiseUnary :
+ {
+ auto elementwiseUnaryQueueDescriptor
+ = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
+
+ switch(elementwiseUnaryQueueDescriptor->m_Parameters.m_Operation)
+ {
+ case UnaryOperation::Abs:
+ {
+ AbsQueueDescriptor absQueueDescriptor;
+ absQueueDescriptor.m_Inputs = elementwiseUnaryQueueDescriptor->m_Inputs;
+ absQueueDescriptor.m_Outputs = elementwiseUnaryQueueDescriptor->m_Outputs;
+
+ return std::make_unique<ClAbsWorkload>(absQueueDescriptor, info, m_CLCompileContext);
+ }
+ case UnaryOperation::Exp:
+ return std::make_unique<ClExpWorkload>(*elementwiseUnaryQueueDescriptor, info, m_CLCompileContext);
+ case UnaryOperation::Log:
+ return std::make_unique<ClLogWorkload>(*elementwiseUnaryQueueDescriptor, info, m_CLCompileContext);
+ case UnaryOperation::LogicalNot:
+ return std::make_unique<ClLogicalNotWorkload>(*elementwiseUnaryQueueDescriptor,
+ info,
+ m_CLCompileContext);
+ case UnaryOperation::Neg:
+ return std::make_unique<ClNegWorkload>(*elementwiseUnaryQueueDescriptor, info, m_CLCompileContext);
+ case UnaryOperation::Rsqrt:
+ {
+ RsqrtQueueDescriptor rsqrtQueueDescriptor;
+ rsqrtQueueDescriptor.m_Inputs = elementwiseUnaryQueueDescriptor->m_Inputs;
+ rsqrtQueueDescriptor.m_Outputs = elementwiseUnaryQueueDescriptor->m_Outputs;
+
+ return std::make_unique<ClRsqrtWorkload>(rsqrtQueueDescriptor, info, m_CLCompileContext);
+ }
+ case UnaryOperation::Sin:
+ return std::make_unique<ClSinWorkload>(*elementwiseUnaryQueueDescriptor, info, m_CLCompileContext);
+ default:
+ return nullptr;
+ }
+ }
+ case LayerType::Fill :
+ {
+ auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
+ return std::make_unique<ClFillWorkload>(*fillQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::Floor :
+ {
+ auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClFloorFloatWorkload, NullWorkload>(*floorQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::FullyConnected :
+ {
+ auto fullyConnectedQueueDescriptor
+ = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClFullyConnectedWorkload>(*fullyConnectedQueueDescriptor,
+ info,
+ m_MemoryManager->GetIntraLayerManager(),
+ m_CLCompileContext);
+ }
+ case LayerType::Gather :
+ {
+ auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClGatherWorkload>(*gatherQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::Input :
+ {
+ auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
+ return std::make_unique<CopyMemGenericWorkload>(*inputQueueDescriptor, info);
+ }
+ case LayerType::InstanceNormalization :
+ {
+ auto instanceNormalizationQueueDescriptor
+ = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClInstanceNormalizationWorkload>(*instanceNormalizationQueueDescriptor,
+ info,
+ m_CLCompileContext);
+ }
+ case LayerType::L2Normalization :
+ {
+ auto l2NormalizationQueueDescriptor
+ = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClL2NormalizationFloatWorkload, NullWorkload>(*l2NormalizationQueueDescriptor,
+ info,
+ m_CLCompileContext);
+ }
+ case LayerType::LogicalBinary :
+ {
+ auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
+
+ switch(logicalBinaryQueueDescriptor->m_Parameters.m_Operation)
+ {
+ case LogicalBinaryOperation::LogicalAnd:
+ return std::make_unique<ClLogicalAndWorkload>(*logicalBinaryQueueDescriptor,
+ info,
+ m_CLCompileContext);
+ case LogicalBinaryOperation::LogicalOr:
+ return std::make_unique<ClLogicalOrWorkload>(*logicalBinaryQueueDescriptor,
+ info,
+ m_CLCompileContext);
+ default:
+ return nullptr;
+ }
+ }
+ case LayerType::LogSoftmax :
+ {
+ auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
+
+ return MakeWorkload<ClLogSoftmaxWorkload>(*logSoftmaxQueueDescriptor,
+ info,
+ m_MemoryManager->GetIntraLayerManager(),
+ m_CLCompileContext);
+ }
+ case LayerType::Lstm :
+ {
+ auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClLstmFloatWorkload, NullWorkload>(*lstmQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::Maximum :
+ {
+ auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClMaximumWorkload>(*maximumQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::Mean :
+ {
+ auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClMeanWorkload>(*meanQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::MemCopy :
+ {
+ auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
+ if (memCopyQueueDescriptor->m_Inputs.empty() || !memCopyQueueDescriptor->m_Inputs[0])
+ {
+ throw InvalidArgumentException("ClWorkloadFactory: Invalid null input for MemCopy workload");
+ }
+ return MakeWorkload<CopyMemGenericWorkload>(*memCopyQueueDescriptor, info);
+ }
+ case LayerType::MemImport :
+ {
+ auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
+ if (memImportQueueDescriptor->m_Inputs.empty() || !memImportQueueDescriptor->m_Inputs[0])
+ {
+ throw InvalidArgumentException("ClWorkloadFactory: Invalid null input for MemImport workload");
+ }
+ return std::make_unique<ImportMemGenericWorkload>(*memImportQueueDescriptor, info);
+ }
+ case LayerType::Minimum :
+ {
+ auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClMinimumWorkload>(*minimumQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::Multiplication :
+ {
+ auto multiplicationQueueDescriptor = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClMultiplicationWorkload>(*multiplicationQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::Normalization :
+ {
+ auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClNormalizationFloatWorkload, NullWorkload>(*normalizationQueueDescriptor,
+ info,
+ m_CLCompileContext);
+ }
+ case LayerType::Output :
+ {
+ auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
+ return std::make_unique<CopyMemGenericWorkload>(*outputQueueDescriptor, info);
+ }
+ case LayerType::Pad :
+ {
+ auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClPadWorkload>(*padQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::Permute :
+ {
+ auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClPermuteWorkload>(*permuteQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::Pooling2d :
+ {
+ auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClPooling2dWorkload>(*pooling2dQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::PreCompiled :
+ {
+ auto preCompiledQueueDescriptor = PolymorphicDowncast<const PreCompiledQueueDescriptor*>(&descriptor);
+ return MakeWorkload<NullWorkload, NullWorkload>(*preCompiledQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::Prelu :
+ {
+ auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClPreluWorkload>(*preluQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::QLstm :
+ {
+ auto qLstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
+ return std::make_unique<ClQLstmWorkload>(*qLstmQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::Quantize :
+ {
+ auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClQuantizeWorkload>(*quantizeQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::QuantizedLstm :
+ {
+ auto quantizedLstmQueueDescriptor = PolymorphicDowncast<const QuantizedLstmQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClQuantizedLstmWorkload>(*quantizedLstmQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::Rank :
+ {
+ auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
+ return std::make_unique<ClRankWorkload>(*rankQueueDescriptor, info);
+ }
+ case LayerType::Reduce :
+ {
+ auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
+ return std::make_unique<ClReduceWorkload>(*reduceQueueDescriptor, info);
+ }
+ case LayerType::Reshape :
+ {
+ auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClReshapeWorkload>(*reshapeQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::Resize :
+ {
+ auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClResizeWorkload>(*resizeQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::Slice :
+ {
+ auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClSliceWorkload>(*sliceQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::Softmax :
+ {
+ auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
+ return std::make_unique<ClSoftmaxWorkload>(*softmaxQueueDescriptor,
+ info,
+ m_MemoryManager->GetIntraLayerManager(),
+ m_CLCompileContext);
+ }
+ case LayerType::SpaceToBatchNd :
+ {
+ auto spaceToBatchNdQueueDescriptor
+ = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClSpaceToBatchNdWorkload>(*spaceToBatchNdQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::SpaceToDepth :
+ {
+ auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClSpaceToDepthWorkload>(*spaceToDepthQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::Splitter :
+ {
+ auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClSplitterWorkload>(*splitterQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::Stack :
+ {
+ auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClStackWorkload>(*stackQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::StridedSlice :
+ {
+ auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClStridedSliceWorkload>(*stridedSliceQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::Subtraction :
+ {
+ auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClSubtractionWorkload>(*subtractionQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::Transpose :
+ {
+ auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClTransposeWorkload>(*transposeQueueDescriptor, info, m_CLCompileContext);
+ }
+ case LayerType::TransposeConvolution2d :
+ {
+ auto transposeConvolution2dQueueDescriptor
+ = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
+ return MakeWorkload<ClTransposeConvolution2dWorkload>(*transposeConvolution2dQueueDescriptor,
+ info,
+ m_MemoryManager->GetIntraLayerManager(),
+ m_CLCompileContext);
+ }
+ default:
+ return nullptr;
+ }
+}
+
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
diff --git a/src/backends/cl/ClWorkloadFactory.hpp b/src/backends/cl/ClWorkloadFactory.hpp
index 047f38582d..91ce711db0 100644
--- a/src/backends/cl/ClWorkloadFactory.hpp
+++ b/src/backends/cl/ClWorkloadFactory.hpp
@@ -55,186 +55,312 @@ public:
DataLayout dataLayout,
const bool IsMemoryManaged = true) const override;
+ std::unique_ptr<IWorkload> CreateWorkload(LayerType type,
+ const QueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateActivation(const ActivationQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateAddition(const AdditionQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateArgMinMax(const ArgMinMaxQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateBatchNormalization(const BatchNormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateCast(const CastQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateChannelShuffle(const ChannelShuffleQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateComparison(const ComparisonQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateConcat(const ConcatQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateConstant(const ConstantQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateConvolution3d(const Convolution3dQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateDepthToSpace(const DepthToSpaceQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateDequantize(const DequantizeQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateDetectionPostProcess(const DetectionPostProcessQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateFill(const FillQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateFloor(const FloorQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateFullyConnected(const FullyConnectedQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateInput(const InputQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateInstanceNormalization(const InstanceNormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateLogicalBinary(const LogicalBinaryQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateLogSoftmax(const LogSoftmaxQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateLstm(const LstmQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateMaximum(const MaximumQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateMean(const MeanQueueDescriptor& descriptor,
const WorkloadInfo& Info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateMemImport(const MemImportQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateMultiplication(const MultiplicationQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateNormalization(const NormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateOutput(const OutputQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreatePad(const PadQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreatePermute(const PermuteQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreatePrelu(const PreluQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateQLstm(const QLstmQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateQuantize(const QuantizeQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateRank(const RankQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateReduce(const ReduceQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateReshape(const ReshapeQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateResize(const ResizeQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateSlice(const SliceQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateSplitter(const SplitterQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateStack(const StackQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateTranspose(const TransposeQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
diff --git a/src/backends/cl/test/OpenClTimerTest.cpp b/src/backends/cl/test/OpenClTimerTest.cpp
index 43387d8e4e..3d273903bd 100644
--- a/src/backends/cl/test/OpenClTimerTest.cpp
+++ b/src/backends/cl/test/OpenClTimerTest.cpp
@@ -99,7 +99,7 @@ TEST_CASE_FIXTURE(OpenClFixture, "OpenClTimerBatchNorm")
// for each channel:
// substract mean, divide by standard deviation (with an epsilon to avoid div by 0)
// multiply by gamma and add beta
- std::unique_ptr<IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
+ std::unique_ptr<IWorkload> workload = workloadFactory.CreateWorkload(LayerType::BatchNormalization, data, info);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index 2d0b8907f7..19d322b75d 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -131,6 +131,435 @@ std::unique_ptr<ITensorHandle> NeonWorkloadFactory::CreateTensorHandle(const Ten
return tensorHandle;
}
+std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateWorkload(LayerType type,
+ const QueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ switch(type)
+ {
+ case LayerType::Activation :
+ {
+ auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonActivationWorkload>(*activationQueueDescriptor, info);
+ }
+ case LayerType::Addition :
+ {
+ auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonAdditionWorkload>(*additionQueueDescriptor, info);
+ }
+ case LayerType::ArgMinMax :
+ {
+ auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonArgMinMaxWorkload>(*argMinMaxQueueDescriptor, info);
+ }
+ case LayerType::BatchNormalization :
+ {
+ auto batchNormalizationQueueDescriptor
+ = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonBatchNormalizationWorkload>(*batchNormalizationQueueDescriptor, info);
+ }
+ case LayerType::BatchToSpaceNd :
+ {
+ auto batchToSpaceNdQueueDescriptor
+ = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonBatchToSpaceNdWorkload>(*batchToSpaceNdQueueDescriptor, info);
+ }
+ case LayerType::Cast :
+ {
+ auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonCastWorkload>(*castQueueDescriptor, info);
+ }
+ case LayerType::ChannelShuffle :
+ {
+ auto channelShuffleQueueDescriptor = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonChannelShuffleWorkload>(*channelShuffleQueueDescriptor, info);
+ }
+ case LayerType::Comparison :
+ {
+ auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonComparisonWorkload>(*comparisonQueueDescriptor, info);
+ }
+ case LayerType::Concat :
+ {
+ auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonConcatWorkload>(*concatQueueDescriptor, info);
+ }
+ case LayerType::Constant :
+ {
+ auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonConstantWorkload>(*constantQueueDescriptor, info);
+ }
+ case LayerType::ConvertBf16ToFp32 :
+ {
+ auto convertBf16ToFp32QueueDescriptor
+ = PolymorphicDowncast<const ConvertBf16ToFp32QueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonConvertBf16ToFp32Workload>(*convertBf16ToFp32QueueDescriptor, info);
+ }
+ case LayerType::ConvertFp16ToFp32 :
+ {
+ auto convertFp16ToFp32QueueDescriptor
+ = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonConvertFp16ToFp32Workload>(*convertFp16ToFp32QueueDescriptor, info);
+ }
+ case LayerType::ConvertFp32ToBf16 :
+ {
+ auto convertFp32ToBf16QueueDescriptor
+ = PolymorphicDowncast<const ConvertFp32ToBf16QueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonConvertFp32ToBf16Workload>(*convertFp32ToBf16QueueDescriptor, info);
+ }
+ case LayerType::ConvertFp32ToFp16 :
+ {
+ auto convertFp32ToFp16QueueDescriptor
+ = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonConvertFp32ToFp16Workload>(*convertFp32ToFp16QueueDescriptor, info);
+ }
+ case LayerType::Convolution2d :
+ {
+ auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
+
+ bool isFastMathEnabled = false;
+ if (m_ModelContextPtr)
+ {
+ if (m_ModelContextPtr.get() != nullptr)
+ {
+ auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
+ if (modelOptions)
+ {
+ isFastMathEnabled = modelOptions->IsFastMathEnabled();
+ }
+ }
+ }
+ return std::make_unique<NeonConvolution2dWorkload>(*convolution2dQueueDescriptor,
+ info,
+ m_MemoryManager->GetIntraLayerManager(),
+ isFastMathEnabled);
+ }
+ case LayerType::Convolution3d :
+ {
+ auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
+
+ bool isFastMathEnabled = false;
+ if (m_ModelContextPtr)
+ {
+ if (m_ModelContextPtr.get() != nullptr)
+ {
+ auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
+ if (modelOptions)
+ {
+ isFastMathEnabled = modelOptions->IsFastMathEnabled();
+ }
+ }
+ }
+ return std::make_unique<NeonConvolution3dWorkload>(*convolution3dQueueDescriptor,
+ info,
+ m_MemoryManager->GetIntraLayerManager(),
+ isFastMathEnabled);
+ }
+ case LayerType::Debug :
+ {
+ auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
+ return MakeWorkloadHelper<NullWorkload, NullWorkload>(*debugQueueDescriptor, info);
+ }
+ case LayerType::DepthToSpace :
+ {
+ auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonDepthToSpaceWorkload>(*depthToSpaceQueueDescriptor, info);
+ }
+ case LayerType::DepthwiseConvolution2d :
+ {
+ auto depthwiseConvolution2dQueueDescriptor
+ = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonDepthwiseConvolutionWorkload>(*depthwiseConvolution2dQueueDescriptor, info);
+ }
+ case LayerType::Dequantize :
+ {
+ auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonDequantizeWorkload>(*dequantizeQueueDescriptor, info);
+ }
+ case LayerType::DetectionPostProcess :
+ {
+ auto detectionPostProcessQueueDescriptor
+ = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
+ return MakeWorkloadHelper<NullWorkload, NullWorkload>(*detectionPostProcessQueueDescriptor, info);
+ }
+ case LayerType::Division :
+ {
+ auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonDivisionWorkload>(*divisionQueueDescriptor, info);
+ }
+ case LayerType::ElementwiseUnary :
+ {
+ auto elementwiseUnaryQueueDescriptor
+ = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
+
+ switch(elementwiseUnaryQueueDescriptor->m_Parameters.m_Operation)
+ {
+ case UnaryOperation::Abs:
+ {
+ AbsQueueDescriptor absQueueDescriptor;
+ absQueueDescriptor.m_Inputs = elementwiseUnaryQueueDescriptor->m_Inputs;
+ absQueueDescriptor.m_Outputs = elementwiseUnaryQueueDescriptor->m_Outputs;
+
+ return std::make_unique<NeonAbsWorkload>(absQueueDescriptor, info);
+ }
+ case UnaryOperation::Exp:
+ return std::make_unique<NeonExpWorkload>(*elementwiseUnaryQueueDescriptor, info);
+ case UnaryOperation::LogicalNot:
+ return std::make_unique<NeonLogicalNotWorkload>(*elementwiseUnaryQueueDescriptor, info);
+ case UnaryOperation::Log:
+ return std::make_unique<NeonLogWorkload>(*elementwiseUnaryQueueDescriptor, info);
+ case UnaryOperation::Neg:
+ return std::make_unique<NeonNegWorkload>(*elementwiseUnaryQueueDescriptor, info);
+ case UnaryOperation::Rsqrt:
+ {
+ RsqrtQueueDescriptor rsqrtQueueDescriptor;
+ rsqrtQueueDescriptor.m_Inputs = elementwiseUnaryQueueDescriptor->m_Inputs;
+ rsqrtQueueDescriptor.m_Outputs = elementwiseUnaryQueueDescriptor->m_Outputs;
+
+ return std::make_unique<NeonRsqrtWorkload>(rsqrtQueueDescriptor, info);
+ }
+ case UnaryOperation::Sin:
+ return std::make_unique<NeonSinWorkload>(*elementwiseUnaryQueueDescriptor, info);
+ default:
+ return nullptr;
+ }
+ }
+ case LayerType::Fill :
+ {
+ auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonFillWorkload>(*fillQueueDescriptor, info);
+ }
+ case LayerType::Floor :
+ {
+ auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
+ return MakeWorkloadHelper<NeonFloorFloatWorkload, NullWorkload>(*floorQueueDescriptor, info);
+ }
+ case LayerType::FullyConnected :
+ {
+ auto fullyConnectedQueueDescriptor = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonFullyConnectedWorkload>(*fullyConnectedQueueDescriptor,
+ info,
+ m_MemoryManager->GetIntraLayerManager());
+ }
+ case LayerType::Gather :
+ {
+ auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonGatherWorkload>(*gatherQueueDescriptor, info);
+ }
+ case LayerType::Input :
+ {
+ auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
+ return std::make_unique<CopyMemGenericWorkload>(*inputQueueDescriptor, info);
+ }
+ case LayerType::InstanceNormalization :
+ {
+ auto instanceNormalizationQueueDescriptor
+ = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonInstanceNormalizationWorkload>(*instanceNormalizationQueueDescriptor, info);
+ }
+ case LayerType::L2Normalization :
+ {
+ auto l2NormalizationQueueDescriptor
+ = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
+ return MakeWorkloadHelper<NeonL2NormalizationFloatWorkload, NullWorkload>
+ (*l2NormalizationQueueDescriptor, info, m_MemoryManager->GetIntraLayerManager());
+ }
+ case LayerType::LogSoftmax :
+ {
+ auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonLogSoftmaxWorkload>(*logSoftmaxQueueDescriptor,
+ info,
+ m_MemoryManager->GetIntraLayerManager());
+ }
+ case LayerType::LogicalBinary :
+ {
+ auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
+
+ switch(logicalBinaryQueueDescriptor->m_Parameters.m_Operation)
+ {
+ case LogicalBinaryOperation::LogicalAnd:
+ return std::make_unique<NeonLogicalAndWorkload>(*logicalBinaryQueueDescriptor, info);
+ case LogicalBinaryOperation::LogicalOr:
+ return std::make_unique<NeonLogicalOrWorkload>(*logicalBinaryQueueDescriptor, info);
+ default:
+ return nullptr;
+ }
+ }
+ case LayerType::Lstm :
+ {
+ auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
+ return MakeWorkloadHelper<NeonLstmFloatWorkload, NullWorkload>(*lstmQueueDescriptor, info);
+ }
+ case LayerType::Maximum :
+ {
+ auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonMaximumWorkload>(*maximumQueueDescriptor, info);
+ }
+ case LayerType::Mean :
+ {
+ auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonMeanWorkload>(*meanQueueDescriptor, info);
+ }
+ case LayerType::MemCopy :
+ {
+ auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
+ if (memCopyQueueDescriptor->m_Inputs.empty() || !memCopyQueueDescriptor->m_Inputs[0])
+ {
+ throw InvalidArgumentException("NeonWorkloadFactory: Invalid null input for MemCopy workload");
+ }
+ return MakeWorkloadHelper<CopyMemGenericWorkload, CopyMemGenericWorkload>(*memCopyQueueDescriptor, info);
+ }
+ case LayerType::MemImport :
+ {
+ auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
+ if (memImportQueueDescriptor->m_Inputs.empty() || !memImportQueueDescriptor->m_Inputs[0])
+ {
+ throw InvalidArgumentException("NeonWorkloadFactory: Invalid null input for MemImport workload");
+ }
+ return std::make_unique<ImportMemGenericWorkload>(*memImportQueueDescriptor, info);
+ }
+ case LayerType::Minimum :
+ {
+ auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonMinimumWorkload>(*minimumQueueDescriptor, info);
+ }
+ case LayerType::Multiplication :
+ {
+ auto multiplicationQueueDescriptor = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonMultiplicationWorkload>(*multiplicationQueueDescriptor, info);
+ }
+ case LayerType::Normalization :
+ {
+ auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
+ return MakeWorkloadHelper<NeonNormalizationFloatWorkload, NullWorkload>
+ (*normalizationQueueDescriptor, info, m_MemoryManager->GetIntraLayerManager());
+ }
+ case LayerType::Output :
+ {
+ auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
+ return std::make_unique<CopyMemGenericWorkload>(*outputQueueDescriptor, info);
+ }
+ case LayerType::Pad :
+ {
+ auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonPadWorkload>(*padQueueDescriptor, info);
+ }
+ case LayerType::Permute :
+ {
+ auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonPermuteWorkload>(*permuteQueueDescriptor, info);
+ }
+ case LayerType::Pooling2d :
+ {
+ auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonPooling2dWorkload>(*pooling2dQueueDescriptor, info);
+ }
+ case LayerType::PreCompiled :
+ {
+ auto preCompiledQueueDescriptor = PolymorphicDowncast<const PreCompiledQueueDescriptor*>(&descriptor);
+ return MakeWorkloadHelper<NullWorkload, NullWorkload>(*preCompiledQueueDescriptor, info);
+ }
+ case LayerType::Prelu :
+ {
+ auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonPreluWorkload>(*preluQueueDescriptor, info);
+ }
+ case LayerType::QLstm :
+ {
+ auto qLstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonQLstmWorkload>(*qLstmQueueDescriptor, info);
+ }
+ case LayerType::Quantize :
+ {
+ auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonQuantizeWorkload>(*quantizeQueueDescriptor, info);
+ }
+ case LayerType::QuantizedLstm :
+ {
+ auto quantizedLstmQueueDescriptor = PolymorphicDowncast<const QuantizedLstmQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonQuantizedLstmWorkload>(*quantizedLstmQueueDescriptor, info);
+ }
+ case LayerType::Rank :
+ {
+ auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonRankWorkload>(*rankQueueDescriptor, info);
+ }
+ case LayerType::Reduce :
+ {
+ auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonReduceWorkload>(*reduceQueueDescriptor, info);
+ }
+ case LayerType::Reshape :
+ {
+ auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonReshapeWorkload>(*reshapeQueueDescriptor, info);
+ }
+ case LayerType::Resize :
+ {
+ auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonResizeWorkload>(*resizeQueueDescriptor, info);
+ }
+ case LayerType::Slice :
+ {
+ auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonSliceWorkload>(*sliceQueueDescriptor, info);
+ }
+ case LayerType::Softmax :
+ {
+ auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonSoftmaxWorkload>(*softmaxQueueDescriptor,
+ info,
+ m_MemoryManager->GetIntraLayerManager());
+ }
+ case LayerType::SpaceToBatchNd :
+ {
+ auto spaceToBatchNdQueueDescriptor
+ = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonSpaceToBatchNdWorkload>(*spaceToBatchNdQueueDescriptor, info);
+ }
+ case LayerType::SpaceToDepth :
+ {
+ auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonSpaceToDepthWorkload>(*spaceToDepthQueueDescriptor, info);
+ }
+ case LayerType::Splitter :
+ {
+ auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonSplitterWorkload>(*splitterQueueDescriptor, info);
+ }
+ case LayerType::Stack :
+ {
+ auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonStackWorkload>(*stackQueueDescriptor, info);
+ }
+ case LayerType::StridedSlice :
+ {
+ auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonStridedSliceWorkload>(*stridedSliceQueueDescriptor, info);
+ }
+ case LayerType::Subtraction :
+ {
+ auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonSubtractionWorkload>(*subtractionQueueDescriptor, info);
+ }
+ case LayerType::Transpose :
+ {
+ auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonTransposeWorkload>(*transposeQueueDescriptor, info);
+ }
+ case LayerType::TransposeConvolution2d :
+ {
+ auto transposeConvolution2dQueueDescriptor
+ = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
+ return std::make_unique<NeonTransposeConvolution2dWorkload>(*transposeConvolution2dQueueDescriptor,
+ info,
+ m_MemoryManager->GetIntraLayerManager());
+ }
+ default:
+ return nullptr;
+ }
+}
+
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
diff --git a/src/backends/neon/NeonWorkloadFactory.hpp b/src/backends/neon/NeonWorkloadFactory.hpp
index f44681a5a8..802b9e1ac9 100644
--- a/src/backends/neon/NeonWorkloadFactory.hpp
+++ b/src/backends/neon/NeonWorkloadFactory.hpp
@@ -52,192 +52,322 @@ public:
DataLayout dataLayout,
const bool IsMemoryManaged = true) const override;
+ std::unique_ptr<IWorkload> CreateWorkload(LayerType type,
+ const QueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateActivation(const ActivationQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateAddition(const AdditionQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateArgMinMax(const ArgMinMaxQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateBatchNormalization(const BatchNormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
const WorkloadInfo& Info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateCast(const CastQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateChannelShuffle(const ChannelShuffleQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateComparison(const ComparisonQueueDescriptor& descriptor,
const WorkloadInfo& Info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateConcat(const ConcatQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateConstant(const ConstantQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateConvertBf16ToFp32(const ConvertBf16ToFp32QueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateConvertFp32ToBf16(const ConvertFp32ToBf16QueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateConvolution3d(const Convolution3dQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateDepthToSpace(const DepthToSpaceQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateDequantize(const DequantizeQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateDetectionPostProcess(const DetectionPostProcessQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
-
+
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
const WorkloadInfo& Info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateFill(const FillQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateFloor(const FloorQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateFullyConnected(const FullyConnectedQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateInput(const InputQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateInstanceNormalization(const InstanceNormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateLogicalBinary(const LogicalBinaryQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateLogSoftmax(const LogSoftmaxQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateLstm(const LstmQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateMaximum(const MaximumQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateMean(const MeanQueueDescriptor& descriptor,
const WorkloadInfo& Info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateMemImport(const MemImportQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateMultiplication(const MultiplicationQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateNormalization(const NormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateOutput(const OutputQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreatePad(const PadQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreatePermute(const PermuteQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreatePrelu(const PreluQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateQLstm(const QLstmQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateQuantize(const QuantizeQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateRank(const RankQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateReduce(const ReduceQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateReshape(const ReshapeQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateResize(const ResizeQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateSlice(const SliceQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateSplitter(const SplitterQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateStack(const StackQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateTranspose(const TransposeQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
diff --git a/src/backends/neon/test/NeonTimerTest.cpp b/src/backends/neon/test/NeonTimerTest.cpp
index 6f96df54cd..7d73df83bd 100644
--- a/src/backends/neon/test/NeonTimerTest.cpp
+++ b/src/backends/neon/test/NeonTimerTest.cpp
@@ -78,7 +78,8 @@ TEST_CASE("NeonTimerMeasure")
descriptor.m_Parameters.m_A = upperBound;
descriptor.m_Parameters.m_B = lowerBound;
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
+ std::unique_ptr<armnn::IWorkload> workload
+ = workloadFactory.CreateWorkload(LayerType::Activation, descriptor, workloadInfo);
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index 01e7a3efb1..9db81fc9cb 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include <Layer.hpp>
@@ -141,6 +141,511 @@ std::unique_ptr<ITensorHandle> RefWorkloadFactory::CreateTensorHandle(const Tens
}
}
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateWorkload(LayerType type,
+ const QueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ switch(type)
+ {
+ case LayerType::Activation :
+ {
+ auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefActivationWorkload>(*activationQueueDescriptor, info);
+ }
+ case LayerType::Addition :
+ {
+ auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
+
+ if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
+ {
+ return std::make_unique<RefAdditionWorkload<int32_t>>(*additionQueueDescriptor, info);
+ }
+ else
+ {
+ return std::make_unique<RefAdditionWorkload<float>>(*additionQueueDescriptor, info);
+ }
+ }
+ case LayerType::ArgMinMax :
+ {
+ auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefArgMinMaxWorkload>(*argMinMaxQueueDescriptor, info);
+ }
+ case LayerType::BatchNormalization :
+ {
+ auto batchNormQueueDescriptor = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefBatchNormalizationWorkload>(*batchNormQueueDescriptor, info);
+ }
+ case LayerType::BatchToSpaceNd :
+ {
+ auto batchToSpaceNdQueueDescriptor
+ = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefBatchToSpaceNdWorkload>(*batchToSpaceNdQueueDescriptor, info);
+ }
+ case LayerType::Cast :
+ {
+ auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefCastWorkload>(*castQueueDescriptor, info);
+ }
+ case LayerType::ChannelShuffle :
+ {
+ auto channelShuffleQueueDescriptor
+ = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefChannelShuffleWorkload>(*channelShuffleQueueDescriptor, info);
+ }
+ case LayerType::Comparison :
+ {
+ auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefComparisonWorkload>(*comparisonQueueDescriptor, info);
+ }
+ case LayerType::Concat :
+ {
+ auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefConcatWorkload>(*concatQueueDescriptor, info);
+ }
+ case LayerType::Constant :
+ {
+ auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefConstantWorkload>(*constantQueueDescriptor, info);
+ }
+ case LayerType::ConvertBf16ToFp32 :
+ {
+ auto convertBf16ToFp32QueueDescriptor
+ = PolymorphicDowncast<const ConvertBf16ToFp32QueueDescriptor*>(&descriptor);
+ return std::make_unique<RefConvertBf16ToFp32Workload>(*convertBf16ToFp32QueueDescriptor, info);
+ }
+ case LayerType::ConvertFp16ToFp32:
+ {
+ auto convertFp16ToFp32QueueDescriptor
+ = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
+ return std::make_unique<RefConvertFp16ToFp32Workload>(*convertFp16ToFp32QueueDescriptor, info);
+ }
+ case LayerType::ConvertFp32ToBf16:
+ {
+ auto convertFp32ToBf16QueueDescriptor
+ = PolymorphicDowncast<const ConvertFp32ToBf16QueueDescriptor*>(&descriptor);
+ return std::make_unique<RefConvertFp32ToBf16Workload>(*convertFp32ToBf16QueueDescriptor, info);
+ }
+ case LayerType::ConvertFp32ToFp16:
+ {
+ auto convertFp32ToFp16QueueDescriptor
+ = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
+ return std::make_unique<RefConvertFp32ToFp16Workload>(*convertFp32ToFp16QueueDescriptor, info);
+ }
+ case LayerType::Convolution2d:
+ {
+ auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefConvolution2dWorkload>(*convolution2dQueueDescriptor, info);
+ }
+ case LayerType::Convolution3d:
+ {
+ auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefConvolution3dWorkload>(*convolution3dQueueDescriptor, info);
+ }
+ case LayerType::Debug:
+ {
+ auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
+ if (IsBFloat16(info))
+ {
+ return std::make_unique<RefDebugBFloat16Workload>(*debugQueueDescriptor, info);
+ }
+ if (IsFloat16(info))
+ {
+ return std::make_unique<RefDebugFloat16Workload>(*debugQueueDescriptor, info);
+ }
+ if (IsQSymmS16(info))
+ {
+ return std::make_unique<RefDebugQSymmS16Workload>(*debugQueueDescriptor, info);
+ }
+ if (IsQSymmS8(info))
+ {
+ return std::make_unique<RefDebugQSymmS8Workload>(*debugQueueDescriptor, info);
+ }
+ if (IsQAsymmU8(info))
+ {
+ return std::make_unique<RefDebugQAsymmU8Workload>(*debugQueueDescriptor, info);
+ }
+ if (IsQAsymmS8(info))
+ {
+ return std::make_unique<RefDebugQAsymmS8Workload>(*debugQueueDescriptor, info);
+ }
+ if (IsSigned32(info))
+ {
+ return std::make_unique<RefDebugSigned32Workload>(*debugQueueDescriptor, info);
+ }
+
+ return MakeWorkload<RefDebugFloat32Workload, RefDebugQAsymmU8Workload>(*debugQueueDescriptor, info);
+ }
+ case LayerType::DepthToSpace:
+ {
+ auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefDepthToSpaceWorkload>(*depthToSpaceQueueDescriptor, info);
+ }
+ case LayerType::DepthwiseConvolution2d:
+ {
+ auto depthwiseConvolution2DQueueDescriptor
+ = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefDepthwiseConvolution2dWorkload>(*depthwiseConvolution2DQueueDescriptor, info);
+ }
+ case LayerType::Dequantize:
+ {
+ auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefDequantizeWorkload>(*dequantizeQueueDescriptor, info);
+ }
+ case LayerType::DetectionPostProcess:
+ {
+ auto detectionPostProcessQueueDescriptor
+ = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefDetectionPostProcessWorkload>(*detectionPostProcessQueueDescriptor, info);
+ }
+ case LayerType::Division:
+ {
+ auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
+ if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
+ {
+ return std::make_unique<RefDivisionWorkload<int32_t>>(*divisionQueueDescriptor, info);
+ }
+ else
+ {
+ return std::make_unique<RefDivisionWorkload<float>>(*divisionQueueDescriptor, info);
+ }
+ }
+ case LayerType::ElementwiseUnary:
+ {
+ auto elementwiseUnaryQueueDescriptor
+ = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
+ if ((*elementwiseUnaryQueueDescriptor).m_Parameters.m_Operation == UnaryOperation::LogicalNot)
+ {
+ return std::make_unique<RefLogicalUnaryWorkload>(*elementwiseUnaryQueueDescriptor, info);
+ }
+ return std::make_unique<RefElementwiseUnaryWorkload>(*elementwiseUnaryQueueDescriptor, info);
+ }
+ case LayerType::FakeQuantization:
+ {
+ auto fakeQuantizationQueueDescriptor
+ = PolymorphicDowncast<const FakeQuantizationQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefFakeQuantizationFloat32Workload>(*fakeQuantizationQueueDescriptor, info);
+ }
+ case LayerType::Fill:
+ {
+ auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefFillWorkload>(*fillQueueDescriptor, info);
+ }
+ case LayerType::Floor:
+ {
+ auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
+ if(IsQuantizedType(info.m_InputTensorInfos[0].GetDataType()))
+ {
+ return nullptr;
+ }
+ else
+ {
+ return std::make_unique<RefFloorWorkload>(*floorQueueDescriptor, info);
+ }
+ }
+ case LayerType::FullyConnected:
+ {
+ auto fullyConnectedQueueDescriptor
+ = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefFullyConnectedWorkload>(*fullyConnectedQueueDescriptor, info);
+ }
+ case LayerType::Gather:
+ {
+ auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefGatherWorkload>(*gatherQueueDescriptor, info);
+ }
+ case LayerType::Input:
+ {
+ auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
+ if (info.m_InputTensorInfos.empty() )
+ {
+ throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Input cannot be zero length");
+ }
+ if (info.m_OutputTensorInfos.empty())
+ {
+ throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Output cannot be zero length");
+ }
+
+ if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
+ {
+ throw InvalidArgumentException("RefWorkloadFactory::CreateInput: "
+ "data input and output differ in byte count.");
+ }
+
+ return std::make_unique<CopyMemGenericWorkload>(*inputQueueDescriptor, info);
+ }
+ case LayerType::InstanceNormalization:
+ {
+ auto instanceNormalizationQueueDescriptor
+ = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefInstanceNormalizationWorkload>(*instanceNormalizationQueueDescriptor, info);
+ }
+ case LayerType::L2Normalization:
+ {
+ auto l2NormalizationQueueDescriptor
+ = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefL2NormalizationWorkload>(*l2NormalizationQueueDescriptor, info);
+ }
+ case LayerType::LogicalBinary:
+ {
+ auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefLogicalBinaryWorkload>(*logicalBinaryQueueDescriptor, info);
+ }
+ case LayerType::LogSoftmax:
+ {
+ auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefLogSoftmaxWorkload>(*logSoftmaxQueueDescriptor, info);
+ }
+ case LayerType::Lstm:
+ {
+ auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefLstmWorkload>(*lstmQueueDescriptor, info);
+ }
+ case LayerType::Maximum:
+ {
+ auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
+ if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
+ {
+ return std::make_unique<RefMaximumWorkload<int32_t>>(*maximumQueueDescriptor, info);
+ }
+ else
+ {
+ return std::make_unique<RefMaximumWorkload<float>>(*maximumQueueDescriptor, info);
+ }
+ }
+ case LayerType::Mean:
+ {
+ auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefMeanWorkload>(*meanQueueDescriptor, info);
+ }
+ case LayerType::MemCopy:
+ {
+ auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
+ if (descriptor.m_Inputs.empty())
+ {
+ throw InvalidArgumentException("RefWorkloadFactory: CreateMemCopy() expected an input tensor.");
+ }
+ return std::make_unique<CopyMemGenericWorkload>(*memCopyQueueDescriptor, info);
+ }
+ case LayerType::MemImport:
+ {
+ auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
+ if (descriptor.m_Inputs.empty())
+ {
+ throw InvalidArgumentException("RefWorkloadFactory: CreateMemImport() expected an input tensor.");
+ }
+ return std::make_unique<ImportMemGenericWorkload>(*memImportQueueDescriptor, info);
+ }
+ case LayerType::Minimum:
+ {
+ auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
+ if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
+ {
+ return std::make_unique<RefMinimumWorkload<int32_t>>(*minimumQueueDescriptor, info);
+ }
+ else
+ {
+ return std::make_unique<RefMinimumWorkload<float>>(*minimumQueueDescriptor, info);
+ }
+ }
+ case LayerType::Multiplication:
+ {
+ auto multiplicationQueueDescriptor
+ = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
+ if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
+ {
+ return std::make_unique<RefMultiplicationWorkload<int32_t>>(*multiplicationQueueDescriptor, info);
+ }
+ else
+ {
+ return std::make_unique<RefMultiplicationWorkload<float>>(*multiplicationQueueDescriptor, info);
+ }
+ }
+ case LayerType::Normalization:
+ {
+ auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefNormalizationWorkload>(*normalizationQueueDescriptor, info);
+ }
+ case LayerType::Output:
+ {
+ auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
+ if (info.m_InputTensorInfos.empty() )
+ {
+ throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Input cannot be zero length");
+ }
+ if (info.m_OutputTensorInfos.empty())
+ {
+ throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Output cannot be zero length");
+ }
+ if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
+ {
+ throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: data input and output "
+ "differ in byte count.");
+ }
+
+ return std::make_unique<CopyMemGenericWorkload>(*outputQueueDescriptor, info);
+ }
+ case LayerType::Pad:
+ {
+ auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefPadWorkload>(*padQueueDescriptor, info);
+ }
+ case LayerType::Permute:
+ {
+ auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
+ if (IsQSymmS16(info))
+ {
+ return std::make_unique<RefPermuteQSymm16Workload>(*permuteQueueDescriptor, info);
+ }
+ else if (IsBFloat16(info))
+ {
+ return std::make_unique<RefPermuteBFloat16Workload>(*permuteQueueDescriptor, info);
+ }
+ else if (IsQAsymmS8(info))
+ {
+ return std::make_unique<RefPermuteQAsymmS8Workload>(*permuteQueueDescriptor, info);
+ }
+ return MakeWorkloadHelper<RefPermuteFloat16Workload, RefPermuteFloat32Workload, RefPermuteQAsymm8Workload,
+ NullWorkload, NullWorkload, NullWorkload>(*permuteQueueDescriptor, info);
+ }
+ case LayerType::Pooling2d:
+ {
+ auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefPooling2dWorkload>(*pooling2dQueueDescriptor, info);
+ }
+ case LayerType::Pooling3d:
+ {
+ auto pooling3dQueueDescriptor = PolymorphicDowncast<const Pooling3dQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefPooling3dWorkload>(*pooling3dQueueDescriptor, info);
+ }
+ case LayerType::PreCompiled:
+ {
+ return nullptr;
+ }
+ case LayerType::Prelu:
+ {
+ auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefPreluWorkload>(*preluQueueDescriptor, info);
+ }
+ case LayerType::QLstm:
+ {
+ auto qlstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefQLstmWorkload>(*qlstmQueueDescriptor, info);
+ }
+ case LayerType::Quantize:
+ {
+ auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefQuantizeWorkload>(*quantizeQueueDescriptor, info);
+ }
+ case LayerType::Rank:
+ {
+ auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefRankWorkload>(*rankQueueDescriptor, info);
+ }
+ case LayerType::Reduce:
+ {
+ auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefReduceWorkload>(*reduceQueueDescriptor, info);
+ }
+ case LayerType::Reshape:
+ {
+ auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefReshapeWorkload>(*reshapeQueueDescriptor, info);
+ }
+ case LayerType::Resize:
+ {
+ auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefResizeWorkload>(*resizeQueueDescriptor, info);
+ }
+ case LayerType::Shape:
+ {
+ auto shapeQueueDescriptor = PolymorphicDowncast<const ShapeQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefShapeWorkload>(*shapeQueueDescriptor, info);
+ }
+ case LayerType::Slice:
+ {
+ auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefSliceWorkload>(*sliceQueueDescriptor, info);
+ }
+ case LayerType::Softmax:
+ {
+ auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefSoftmaxWorkload>(*softmaxQueueDescriptor, info);
+ }
+ case LayerType::SpaceToBatchNd:
+ {
+ auto spaceToBatchNdQueueDescriptor
+ = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefSpaceToBatchNdWorkload>(*spaceToBatchNdQueueDescriptor, info);
+ }
+ case LayerType::SpaceToDepth:
+ {
+ auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefSpaceToDepthWorkload>(*spaceToDepthQueueDescriptor, info);
+ }
+ case LayerType::Splitter:
+ {
+ auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefSplitterWorkload>(*splitterQueueDescriptor, info);
+ }
+ case LayerType::Stack:
+ {
+ auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefStackWorkload>(*stackQueueDescriptor, info);
+ }
+ case LayerType::StridedSlice:
+ {
+ auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefStridedSliceWorkload>(*stridedSliceQueueDescriptor, info);
+ }
+ case LayerType::Subtraction:
+ {
+ auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
+ if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
+ {
+ return std::make_unique<RefSubtractionWorkload<int32_t>>(*subtractionQueueDescriptor, info);
+ }
+ else
+ {
+ return std::make_unique<RefSubtractionWorkload<float>>(*subtractionQueueDescriptor, info);
+ }
+ }
+ case LayerType::Transpose:
+ {
+ auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
+ if (IsQSymmS16(info))
+ {
+ return std::make_unique<RefTransposeQSymm16Workload>(*transposeQueueDescriptor, info);
+ }
+ else if (IsBFloat16(info))
+ {
+ return std::make_unique<RefTransposeBFloat16Workload>(*transposeQueueDescriptor, info);
+ }
+ else if (IsQAsymmS8(info))
+ {
+ return std::make_unique<RefTransposeQAsymmS8Workload>(*transposeQueueDescriptor, info);
+ }
+ return MakeWorkloadHelper<RefTransposeFloat16Workload, RefTransposeFloat32Workload,
+ RefTransposeQAsymm8Workload, NullWorkload, NullWorkload, NullWorkload>
+ (*transposeQueueDescriptor, info);
+ }
+ case LayerType::TransposeConvolution2d:
+ {
+ auto transposeConvolution2dQueueDescriptor
+ = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefTransposeConvolution2dWorkload>(*transposeConvolution2dQueueDescriptor, info);
+ }
+ case LayerType::UnidirectionalSequenceLstm:
+ {
+ auto unidirectionalSequenceLstmQueueDescriptor
+ = PolymorphicDowncast<const UnidirectionalSequenceLstmQueueDescriptor*>(&descriptor);
+ return std::make_unique<RefUnidirectionalSequenceLstmWorkload>(*unidirectionalSequenceLstmQueueDescriptor,
+ info);
+ }
+ default:
+ return nullptr;
+ }
+}
+
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp
index 3dfd3d8a7a..d9d4371a69 100644
--- a/src/backends/reference/RefWorkloadFactory.hpp
+++ b/src/backends/reference/RefWorkloadFactory.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -66,201 +66,337 @@ public:
DataLayout dataLayout,
const bool IsMemoryManaged = true) const override;
+ std::unique_ptr<IWorkload> CreateWorkload(LayerType type,
+ const QueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateActivation(const ActivationQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateAddition(const AdditionQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateArgMinMax(const ArgMinMaxQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateBatchNormalization(const BatchNormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateCast(const CastQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateChannelShuffle(const ChannelShuffleQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateComparison(const ComparisonQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateConcat(const ConcatQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateConstant(const ConstantQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateConvertBf16ToFp32(const ConvertBf16ToFp32QueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateConvertFp32ToBf16(const ConvertFp32ToBf16QueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateConvolution3d(const Convolution3dQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateDepthToSpace(const DepthToSpaceQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateDequantize(const DequantizeQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateDetectionPostProcess(const DetectionPostProcessQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateFill(const FillQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateFloor(const FloorQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateFullyConnected(const FullyConnectedQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateInput(const InputQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateInstanceNormalization(const InstanceNormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateLogicalBinary(const LogicalBinaryQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateLogSoftmax(const LogSoftmaxQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateLstm(const LstmQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateMaximum(const MaximumQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateMean(const MeanQueueDescriptor& descriptor,
const WorkloadInfo& Info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateMemImport(const MemImportQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateMultiplication(const MultiplicationQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateNormalization(const NormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateOutput(const OutputQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreatePad(const PadQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreatePermute(const PermuteQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreatePooling3d(const Pooling3dQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreatePrelu(const PreluQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateQLstm(const QLstmQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateQuantize(const QuantizeQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateRank(const RankQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateReduce(const ReduceQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateReshape(const ReshapeQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateResize(const ResizeQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateShape(const ShapeQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateSlice(const SliceQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateSplitter(const SplitterQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateStack(const StackQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateTranspose(const TransposeQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable "
+ "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "22.08")
std::unique_ptr<IWorkload> CreateUnidirectionalSequenceLstm(
const UnidirectionalSequenceLstmQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;