// // Copyright © 2017 Arm Ltd. All rights reserved. // See LICENSE file in the project root for full license information. // #include "WorkloadFactory.hpp" #include "RefWorkloadFactory.hpp" #include "NeonWorkloadFactory.hpp" #include "ClWorkloadFactory.hpp" #include "armnn/Types.hpp" #include "armnn/LayerSupport.hpp" #include "Layer.hpp" #include "LayersFwd.hpp" #include "CpuTensorHandle.hpp" #include #include #include namespace armnn { bool IWorkloadFactory::IsLayerSupported(Compute compute, const Layer& layer, DataType dataType, std::string& outReasonIfUnsupported) { constexpr size_t reasonCapacity = 1024; char reason[reasonCapacity]; bool result; switch(layer.GetType()) { case LayerType::Activation: { auto cLayer = boost::polymorphic_downcast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); result = IsActivationSupported(compute, input, cLayer->GetParameters(), reason, reasonCapacity); break; } case LayerType::Addition: { const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = IsAdditionSupported(compute, input0, input1, output, reason, reasonCapacity); break; } case LayerType::BatchNormalization: { auto cLayer = boost::polymorphic_downcast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); result = IsBatchNormalizationSupported(compute, input, cLayer->GetParameters(), reason, reasonCapacity); break; } case LayerType::Constant: { const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = IsConstantSupported(compute, output, reason, reasonCapacity); break; } case LayerType::Convolution2d: { auto cLayer = boost::polymorphic_downcast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); BOOST_ASSERT(cLayer->m_Weight.get() != nullptr); const TensorInfo * biasInfo = nullptr; static const TensorInfo dummyFloat32Bias(TensorShape({1,1,1,1}), DataType::Float32); static const TensorInfo dummyQA8Bias(TensorShape({1,1,1,1}), DataType::Signed32); const Convolution2dDescriptor& descriptor = cLayer->GetParameters(); if (descriptor.m_BiasEnabled) { BOOST_ASSERT(cLayer->m_Bias.get() != nullptr); biasInfo = &(cLayer->m_Bias->GetTensorInfo()); } else { // If biases are not enabled I pass a dummy tensorinfo for the validation switch(input.GetDataType()) { case DataType::Float32: { biasInfo = &dummyFloat32Bias; break; } case DataType::QuantisedAsymm8: { biasInfo = &dummyQA8Bias; break; } default: { BOOST_ASSERT_MSG(false, "Unexpected input type"); } } } result = IsConvolution2dSupported(compute, input, output, descriptor, cLayer->m_Weight->GetTensorInfo(), *biasInfo, reason, reasonCapacity); break; } case LayerType::MemCopy: { // MemCopy supported for CpuRef, CpuAcc and GpuAcc backends // (also treat Undefined as CpuRef to avoid breaking lots of Unit tests) result = compute == Compute::CpuRef || compute == Compute::Undefined || compute == Compute::CpuAcc || compute == Compute::GpuAcc; strcpy(reason, "Unsupported backend type"); break; } case LayerType::DepthwiseConvolution2d: { auto cLayer = boost::polymorphic_downcast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); result = IsDepthwiseConvolutionSupported(compute, input, cLayer->GetParameters(), cLayer->m_Weight->GetTensorInfo(), reason, reasonCapacity); break; } case LayerType::FakeQuantization: { auto cLayer = boost::polymorphic_downcast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); result = IsFakeQuantizationSupported(compute, input, cLayer->GetParameters(), reason, reasonCapacity); break; } case LayerType::Floor: { const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = IsFloorSupported(compute, input, output, reason, reasonCapacity); break; } case LayerType::FullyConnected: { auto cLayer = boost::polymorphic_downcast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); result = IsFullyConnectedSupported(compute, input, cLayer->GetParameters(), reason, reasonCapacity); break; } case LayerType::Input: { const TensorInfo& input = layer.GetOutputSlot(0).GetTensorInfo(); result = IsInputSupported(compute, input, reason, reasonCapacity); break; } case LayerType::L2Normalization: { const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); result = IsL2NormalizationSupported(compute, input, reason, reasonCapacity); break; } case LayerType::Merger: { auto cLayer = boost::polymorphic_downcast(&layer); // Get vector of all inputs auto getTensorInfo = [](const InputSlot& slot) { return &slot.GetConnectedOutputSlot()->GetTensorInfo(); }; auto begin = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfo); auto end = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfo); std::vector inputs(begin, end); result = IsMergerSupported(compute, inputs, cLayer->GetParameters(), reason, reasonCapacity); break; } case LayerType::Multiplication: { const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo(); result = IsMultiplicationSupported(compute, input0, input1, reason, reasonCapacity); break; } case LayerType::Normalization: { auto cLayer = boost::polymorphic_downcast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = IsNormalizationSupported(compute, input, output, cLayer->GetParameters(), reason, reasonCapacity); break; } case LayerType::Output: { const TensorInfo& output = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); result = IsOutputSupported(compute, output, reason, reasonCapacity); break; } case LayerType::Permute: { auto cLayer = boost::polymorphic_downcast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = IsPermuteSupported(compute, input, output, cLayer->GetParameters(), reason, reasonCapacity); break; } case LayerType::Pooling2d: { auto cLayer = boost::polymorphic_downcast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = IsPooling2dSupported(compute, input, output, cLayer->GetParameters(), reason, reasonCapacity); break; } case LayerType::Reshape: { const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); result = IsReshapeSupported(compute, input, reason, reasonCapacity); break; } case LayerType::ResizeBilinear: { const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); result = IsResizeBilinearSupported(compute, input, reason, reasonCapacity); break; } case LayerType::Softmax: { auto cLayer = boost::polymorphic_downcast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); result = IsSoftmaxSupported(compute, input, cLayer->GetParameters(), reason, reasonCapacity); break; } case LayerType::Splitter: { auto cLayer = boost::polymorphic_downcast(&layer); const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); result = IsSplitterSupported(compute, input, cLayer->GetParameters(), reason, reasonCapacity); break; } default: { BOOST_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer."); strcpy(reason, "Unrecognised layer type"); result = false; break; } } outReasonIfUnsupported = reason; return result; } bool IWorkloadFactory::IsLayerSupported(const Layer& layer, DataType dataType, std::string& outReasonIfUnsupported) { return IsLayerSupported(layer.GetComputeDevice(), layer, dataType, outReasonIfUnsupported); } }