// // Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "NeonBackendId.hpp" #include "NeonBackendModelContext.hpp" #include "NeonTensorHandle.hpp" #include "NeonWorkloadFactory.hpp" #include #include #include #include #include #include #include #include #include #include #include namespace armnn { namespace { static const BackendId s_Id{NeonBackendId()}; } bool NeonWorkloadFactory::IsLayerSupported(const Layer& layer, Optional dataType, std::string& outReasonIfUnsupported) { return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported); } bool NeonWorkloadFactory::IsLayerSupported(const IConnectableLayer& layer, Optional dataType, std::string& outReasonIfUnsupported, const ModelOptions& modelOptions) { return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported, modelOptions); } const BackendId& NeonWorkloadFactory::GetBackendId() const { return s_Id; } void NeonWorkloadFactory::SetNumberOfThreads() { if (m_ModelContextPtr) { const unsigned int MIN_THREADS = 1; const unsigned int MAX_THREADS = 64; // Set the number of threads to be used if the user has set NumberOfThreads param // Only set if within limit or valid input auto modelOptions = dynamic_cast(m_ModelContextPtr.get()); auto numberOfThreads = modelOptions->GetNumberOfThreads(); if (numberOfThreads != 0 && numberOfThreads >= MIN_THREADS && numberOfThreads <= MAX_THREADS) { arm_compute::Scheduler::get().set_num_threads(numberOfThreads); } } } NeonWorkloadFactory::NeonWorkloadFactory(const std::shared_ptr& memoryManager) : m_MemoryManager(memoryManager), m_ModelContextPtr(IBackendInternal::IBackendSpecificModelContextPtr{}) { SetNumberOfThreads(); } NeonWorkloadFactory::NeonWorkloadFactory(const std::shared_ptr& memoryManager, const IBackendInternal::IBackendSpecificModelContextPtr& modelContextPtr) : m_MemoryManager(memoryManager), m_ModelContextPtr(modelContextPtr) { SetNumberOfThreads(); } std::unique_ptr NeonWorkloadFactory::CreateSubTensorHandle(ITensorHandle& parent, TensorShape const& subTensorShape, unsigned int const* subTensorOrigin) const { const arm_compute::TensorShape shape = armcomputetensorutils::BuildArmComputeTensorShape(subTensorShape); arm_compute::Coordinates coords; coords.set_num_dimensions(subTensorShape.GetNumDimensions()); for (unsigned int i = 0; i < subTensorShape.GetNumDimensions(); i++) { // Arm compute indexes tensor coords in reverse order. unsigned int revertedIndex = subTensorShape.GetNumDimensions() - i - 1; coords.set(i, armnn::numeric_cast(subTensorOrigin[revertedIndex])); } const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.GetShape()); if (!::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, parentShape, coords, shape)) { return nullptr; } return std::make_unique( PolymorphicDowncast(&parent), shape, coords); } std::unique_ptr NeonWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo, const bool IsMemoryManaged) const { auto tensorHandle = std::make_unique(tensorInfo); if (IsMemoryManaged) { tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup()); } return tensorHandle; } std::unique_ptr NeonWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo, DataLayout dataLayout, const bool IsMemoryManaged) const { auto tensorHandle = std::make_unique(tensorInfo, dataLayout); if (IsMemoryManaged) { tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup()); } return tensorHandle; } std::unique_ptr NeonWorkloadFactory::CreateWorkload(LayerType type, const QueueDescriptor& descriptor, const WorkloadInfo& info) const { switch(type) { case LayerType::Activation : { auto activationQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*activationQueueDescriptor, info); } case LayerType::Addition : { auto additionQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*additionQueueDescriptor, info); } case LayerType::ArgMinMax : { auto argMinMaxQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*argMinMaxQueueDescriptor, info); } case LayerType::BatchNormalization : { auto batchNormalizationQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*batchNormalizationQueueDescriptor, info); } case LayerType::BatchToSpaceNd : { auto batchToSpaceNdQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*batchToSpaceNdQueueDescriptor, info); } case LayerType::Cast : { auto castQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*castQueueDescriptor, info); } case LayerType::ChannelShuffle : { auto channelShuffleQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*channelShuffleQueueDescriptor, info); } case LayerType::Comparison : { auto comparisonQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*comparisonQueueDescriptor, info); } case LayerType::Concat : { auto concatQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*concatQueueDescriptor, info); } case LayerType::Constant : { auto constantQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*constantQueueDescriptor, info); } case LayerType::ConvertBf16ToFp32 : { auto convertBf16ToFp32QueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*convertBf16ToFp32QueueDescriptor, info); } case LayerType::ConvertFp16ToFp32 : { auto convertFp16ToFp32QueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*convertFp16ToFp32QueueDescriptor, info); } case LayerType::ConvertFp32ToBf16 : { auto convertFp32ToBf16QueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*convertFp32ToBf16QueueDescriptor, info); } case LayerType::ConvertFp32ToFp16 : { auto convertFp32ToFp16QueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*convertFp32ToFp16QueueDescriptor, info); } case LayerType::Convolution2d : { auto convolution2dQueueDescriptor = PolymorphicDowncast(&descriptor); bool isFastMathEnabled = false; if (m_ModelContextPtr) { if (m_ModelContextPtr.get() != nullptr) { auto modelOptions = dynamic_cast(m_ModelContextPtr.get()); if (modelOptions) { isFastMathEnabled = modelOptions->IsFastMathEnabled(); } } } return std::make_unique(*convolution2dQueueDescriptor, info, m_MemoryManager->GetIntraLayerManager(), isFastMathEnabled); } case LayerType::Convolution3d : { auto convolution3dQueueDescriptor = PolymorphicDowncast(&descriptor); bool isFastMathEnabled = false; if (m_ModelContextPtr) { if (m_ModelContextPtr.get() != nullptr) { auto modelOptions = dynamic_cast(m_ModelContextPtr.get()); if (modelOptions) { isFastMathEnabled = modelOptions->IsFastMathEnabled(); } } } return std::make_unique(*convolution3dQueueDescriptor, info, m_MemoryManager->GetIntraLayerManager(), isFastMathEnabled); } case LayerType::Debug : { auto debugQueueDescriptor = PolymorphicDowncast(&descriptor); return MakeWorkloadHelper(*debugQueueDescriptor, info); } case LayerType::DepthToSpace : { auto depthToSpaceQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*depthToSpaceQueueDescriptor, info); } case LayerType::DepthwiseConvolution2d : { auto depthwiseConvolution2dQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*depthwiseConvolution2dQueueDescriptor, info); } case LayerType::Dequantize : { auto dequantizeQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*dequantizeQueueDescriptor, info); } case LayerType::DetectionPostProcess : { auto detectionPostProcessQueueDescriptor = PolymorphicDowncast(&descriptor); return MakeWorkloadHelper(*detectionPostProcessQueueDescriptor, info); } case LayerType::Division : { auto divisionQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*divisionQueueDescriptor, info); } case LayerType::ElementwiseUnary : { auto elementwiseUnaryQueueDescriptor = PolymorphicDowncast(&descriptor); switch(elementwiseUnaryQueueDescriptor->m_Parameters.m_Operation) { case UnaryOperation::Abs: { AbsQueueDescriptor absQueueDescriptor; absQueueDescriptor.m_Inputs = elementwiseUnaryQueueDescriptor->m_Inputs; absQueueDescriptor.m_Outputs = elementwiseUnaryQueueDescriptor->m_Outputs; return std::make_unique(absQueueDescriptor, info); } case UnaryOperation::Exp: return std::make_unique(*elementwiseUnaryQueueDescriptor, info); case UnaryOperation::LogicalNot: return std::make_unique(*elementwiseUnaryQueueDescriptor, info); case UnaryOperation::Log: return std::make_unique(*elementwiseUnaryQueueDescriptor, info); case UnaryOperation::Neg: return std::make_unique(*elementwiseUnaryQueueDescriptor, info); case UnaryOperation::Rsqrt: { RsqrtQueueDescriptor rsqrtQueueDescriptor; rsqrtQueueDescriptor.m_Inputs = elementwiseUnaryQueueDescriptor->m_Inputs; rsqrtQueueDescriptor.m_Outputs = elementwiseUnaryQueueDescriptor->m_Outputs; return std::make_unique(rsqrtQueueDescriptor, info); } case UnaryOperation::Sin: return std::make_unique(*elementwiseUnaryQueueDescriptor, info); default: return nullptr; } } case LayerType::Fill : { auto fillQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*fillQueueDescriptor, info); } case LayerType::Floor : { auto floorQueueDescriptor = PolymorphicDowncast(&descriptor); return MakeWorkloadHelper(*floorQueueDescriptor, info); } case LayerType::FullyConnected : { auto fullyConnectedQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*fullyConnectedQueueDescriptor, info, m_MemoryManager->GetIntraLayerManager()); } case LayerType::Gather : { auto gatherQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*gatherQueueDescriptor, info); } case LayerType::Input : { auto inputQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*inputQueueDescriptor, info); } case LayerType::InstanceNormalization : { auto instanceNormalizationQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*instanceNormalizationQueueDescriptor, info); } case LayerType::L2Normalization : { auto l2NormalizationQueueDescriptor = PolymorphicDowncast(&descriptor); return MakeWorkloadHelper (*l2NormalizationQueueDescriptor, info, m_MemoryManager->GetIntraLayerManager()); } case LayerType::LogSoftmax : { auto logSoftmaxQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*logSoftmaxQueueDescriptor, info, m_MemoryManager->GetIntraLayerManager()); } case LayerType::LogicalBinary : { auto logicalBinaryQueueDescriptor = PolymorphicDowncast(&descriptor); switch(logicalBinaryQueueDescriptor->m_Parameters.m_Operation) { case LogicalBinaryOperation::LogicalAnd: return std::make_unique(*logicalBinaryQueueDescriptor, info); case LogicalBinaryOperation::LogicalOr: return std::make_unique(*logicalBinaryQueueDescriptor, info); default: return nullptr; } } case LayerType::Lstm : { auto lstmQueueDescriptor = PolymorphicDowncast(&descriptor); return MakeWorkloadHelper(*lstmQueueDescriptor, info); } case LayerType::Maximum : { auto maximumQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*maximumQueueDescriptor, info); } case LayerType::Mean : { auto meanQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*meanQueueDescriptor, info); } case LayerType::MemCopy : { auto memCopyQueueDescriptor = PolymorphicDowncast(&descriptor); if (memCopyQueueDescriptor->m_Inputs.empty() || !memCopyQueueDescriptor->m_Inputs[0]) { throw InvalidArgumentException("NeonWorkloadFactory: Invalid null input for MemCopy workload"); } return MakeWorkloadHelper(*memCopyQueueDescriptor, info); } case LayerType::MemImport : { auto memImportQueueDescriptor = PolymorphicDowncast(&descriptor); if (memImportQueueDescriptor->m_Inputs.empty() || !memImportQueueDescriptor->m_Inputs[0]) { throw InvalidArgumentException("NeonWorkloadFactory: Invalid null input for MemImport workload"); } return std::make_unique(*memImportQueueDescriptor, info); } case LayerType::Minimum : { auto minimumQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*minimumQueueDescriptor, info); } case LayerType::Multiplication : { auto multiplicationQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*multiplicationQueueDescriptor, info); } case LayerType::Normalization : { auto normalizationQueueDescriptor = PolymorphicDowncast(&descriptor); return MakeWorkloadHelper (*normalizationQueueDescriptor, info, m_MemoryManager->GetIntraLayerManager()); } case LayerType::Output : { auto outputQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*outputQueueDescriptor, info); } case LayerType::Pad : { auto padQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*padQueueDescriptor, info); } case LayerType::Permute : { auto permuteQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*permuteQueueDescriptor, info); } case LayerType::Pooling2d : { auto pooling2dQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*pooling2dQueueDescriptor, info); } case LayerType::PreCompiled : { auto preCompiledQueueDescriptor = PolymorphicDowncast(&descriptor); return MakeWorkloadHelper(*preCompiledQueueDescriptor, info); } case LayerType::Prelu : { auto preluQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*preluQueueDescriptor, info); } case LayerType::QLstm : { auto qLstmQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*qLstmQueueDescriptor, info); } case LayerType::Quantize : { auto quantizeQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*quantizeQueueDescriptor, info); } case LayerType::QuantizedLstm : { auto quantizedLstmQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*quantizedLstmQueueDescriptor, info); } case LayerType::Rank : { auto rankQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*rankQueueDescriptor, info); } case LayerType::Reduce : { auto reduceQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*reduceQueueDescriptor, info); } case LayerType::Reshape : { auto reshapeQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*reshapeQueueDescriptor, info); } case LayerType::Resize : { auto resizeQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*resizeQueueDescriptor, info); } case LayerType::Slice : { auto sliceQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*sliceQueueDescriptor, info); } case LayerType::Softmax : { auto softmaxQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*softmaxQueueDescriptor, info, m_MemoryManager->GetIntraLayerManager()); } case LayerType::SpaceToBatchNd : { auto spaceToBatchNdQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*spaceToBatchNdQueueDescriptor, info); } case LayerType::SpaceToDepth : { auto spaceToDepthQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*spaceToDepthQueueDescriptor, info); } case LayerType::Splitter : { auto splitterQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*splitterQueueDescriptor, info); } case LayerType::Stack : { auto stackQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*stackQueueDescriptor, info); } case LayerType::StridedSlice : { auto stridedSliceQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*stridedSliceQueueDescriptor, info); } case LayerType::Subtraction : { auto subtractionQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*subtractionQueueDescriptor, info); } case LayerType::Transpose : { auto transposeQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*transposeQueueDescriptor, info); } case LayerType::TransposeConvolution2d : { auto transposeConvolution2dQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*transposeConvolution2dQueueDescriptor, info, m_MemoryManager->GetIntraLayerManager()); } case LayerType::UnidirectionalSequenceLstm : { auto desc = PolymorphicDowncast(&descriptor); return MakeWorkloadHelper(*desc, info); } default: return nullptr; } } std::unique_ptr NeonWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateArgMinMax(const ArgMinMaxQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateBatchNormalization( const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateCast(const CastQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateChannelShuffle(const ChannelShuffleQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateConvertBf16ToFp32( const ConvertBf16ToFp32QueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateConvertFp16ToFp32( const ConvertFp16ToFp32QueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateConvertFp32ToBf16( const ConvertFp32ToBf16QueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateConvertFp32ToFp16( const ConvertFp32ToFp16QueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateConvolution2d( const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const { bool isFastMathEnabled = false; if (m_ModelContextPtr) { if (m_ModelContextPtr.get() != nullptr) { auto modelOptions = dynamic_cast(m_ModelContextPtr.get()); if (modelOptions) { isFastMathEnabled = modelOptions->IsFastMathEnabled(); } } } return std::make_unique(descriptor, info, m_MemoryManager->GetIntraLayerManager(), isFastMathEnabled); } std::unique_ptr NeonWorkloadFactory::CreateConvolution3d( const Convolution3dQueueDescriptor& descriptor, const WorkloadInfo& info) const { bool isFastMathEnabled = false; if (m_ModelContextPtr) { if (m_ModelContextPtr.get() != nullptr) { auto modelOptions = dynamic_cast(m_ModelContextPtr.get()); if (modelOptions) { isFastMathEnabled = modelOptions->IsFastMathEnabled(); } } } return std::make_unique(descriptor, info, m_MemoryManager->GetIntraLayerManager(), isFastMathEnabled); } std::unique_ptr NeonWorkloadFactory::CreateDebug(const DebugQueueDescriptor& descriptor, const WorkloadInfo& info) const { return MakeWorkloadHelper(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateDepthToSpace(const DepthToSpaceQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateDepthwiseConvolution2d( const DepthwiseConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateDequantize(const DequantizeQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateDetectionPostProcess( const armnn::DetectionPostProcessQueueDescriptor& descriptor, const armnn::WorkloadInfo& info) const { return MakeWorkloadHelper(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateDivision( const DivisionQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateElementwiseUnary( const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info) const { switch(descriptor.m_Parameters.m_Operation) { case UnaryOperation::Abs: { AbsQueueDescriptor absQueueDescriptor; absQueueDescriptor.m_Inputs = descriptor.m_Inputs; absQueueDescriptor.m_Outputs = descriptor.m_Outputs; return std::make_unique(absQueueDescriptor, info); } case UnaryOperation::Exp: return std::make_unique(descriptor, info); case UnaryOperation::LogicalNot: return std::make_unique(descriptor, info); case UnaryOperation::Log: return std::make_unique(descriptor, info); case UnaryOperation::Neg: return std::make_unique(descriptor, info); case UnaryOperation::Rsqrt: { RsqrtQueueDescriptor rsqrtQueueDescriptor; rsqrtQueueDescriptor.m_Inputs = descriptor.m_Inputs; rsqrtQueueDescriptor.m_Outputs = descriptor.m_Outputs; return std::make_unique(rsqrtQueueDescriptor, info); } case UnaryOperation::Sin: return std::make_unique(descriptor, info); default: return nullptr; } } std::unique_ptr NeonWorkloadFactory::CreateFill(const FillQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor, const WorkloadInfo& info) const { return MakeWorkloadHelper(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateFullyConnected( const FullyConnectedQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info, m_MemoryManager->GetIntraLayerManager()); } std::unique_ptr NeonWorkloadFactory::CreateGather(const armnn::GatherQueueDescriptor& descriptor, const armnn::WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateInput(const InputQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateInstanceNormalization( const InstanceNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const { return MakeWorkloadHelper(descriptor, info, m_MemoryManager->GetIntraLayerManager()); } std::unique_ptr NeonWorkloadFactory::CreateLogSoftmax(const LogSoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info, m_MemoryManager->GetIntraLayerManager()); } std::unique_ptr NeonWorkloadFactory::CreateLogicalBinary(const LogicalBinaryQueueDescriptor& descriptor, const WorkloadInfo& info) const { switch(descriptor.m_Parameters.m_Operation) { case LogicalBinaryOperation::LogicalAnd: return std::make_unique(descriptor, info); case LogicalBinaryOperation::LogicalOr: return std::make_unique(descriptor, info); default: return nullptr; } } std::unique_ptr NeonWorkloadFactory::CreateLstm(const LstmQueueDescriptor& descriptor, const WorkloadInfo& info) const { return MakeWorkloadHelper(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateMaximum(const MaximumQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateMean(const MeanQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& descriptor, const WorkloadInfo& info) const { if (descriptor.m_Inputs.empty() || !descriptor.m_Inputs[0]) { throw InvalidArgumentException("NeonWorkloadFactory: Invalid null input for MemCopy workload"); } return MakeWorkloadHelper(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateMemImport(const MemImportQueueDescriptor& descriptor, const WorkloadInfo& info) const { if (descriptor.m_Inputs.empty() || !descriptor.m_Inputs[0]) { throw InvalidArgumentException("NeonWorkloadFactory: Invalid null input for MemImport workload"); } return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateMultiplication( const MultiplicationQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateNormalization( const NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const { return MakeWorkloadHelper(descriptor, info, m_MemoryManager->GetIntraLayerManager()); } std::unique_ptr NeonWorkloadFactory::CreateOutput(const OutputQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreatePad(const PadQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor, const WorkloadInfo& info) const { return MakeWorkloadHelper(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreatePrelu(const armnn::PreluQueueDescriptor &descriptor, const armnn::WorkloadInfo &info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateQLstm(const QLstmQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateRank(const RankQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateReduce(const ReduceQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateResize(const ResizeQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateSlice(const SliceQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info, m_MemoryManager->GetIntraLayerManager()); } std::unique_ptr NeonWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateStack(const StackQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateSubtraction( const SubtractionQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateTranspose(const TransposeQueueDescriptor& descriptor, const WorkloadInfo& info) const { return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateTransposeConvolution2d( const TransposeConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const { return std::make_unique(descriptor, info, m_MemoryManager->GetIntraLayerManager()); } } // namespace armnn