From 3b9383548309a8f9121668826a628ec250c57a1c Mon Sep 17 00:00:00 2001 From: Francis Murtagh Date: Fri, 26 Jul 2019 15:44:17 +0100 Subject: IVGCVSW-3554 Update workloads to pass Softmax Axis Parameter to Backends * Add check in CL and Neon to ensure axis is 1 otherwise return unsupported. * Edit CreateWorkload test and JsonPrinter test to ensure axis of 1. Change-Id: I499b405532e26fefc2dd1c18b6dc6005813b5604 Signed-off-by: Francis Murtagh --- src/armnn/test/CreateWorkload.hpp | 7 ++++++- src/armnnCaffeParser/CaffeParser.cpp | 1 + src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp | 8 +++++++- src/backends/cl/ClLayerSupport.cpp | 8 +++++++- src/backends/cl/workloads/ClSoftmaxBaseWorkload.cpp | 7 +++++-- src/backends/cl/workloads/ClSoftmaxBaseWorkload.hpp | 4 +++- src/backends/cl/workloads/ClSoftmaxFloatWorkload.cpp | 2 +- src/backends/neon/NeonLayerSupport.cpp | 6 ++++++ src/backends/neon/workloads/NeonSoftmaxBaseWorkload.cpp | 4 +++- src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp | 2 +- src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp | 3 +-- 11 files changed, 41 insertions(+), 11 deletions(-) diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp index 4181db26d0..619c6d02cd 100644 --- a/src/armnn/test/CreateWorkload.hpp +++ b/src/armnn/test/CreateWorkload.hpp @@ -617,8 +617,13 @@ std::unique_ptr CreateSoftmaxWorkloadTest(armnn::IWorkloadFacto { // Create the layer we're testing. SoftmaxDescriptor softmaxDescriptor; - Layer* const layer = graph.AddLayer(softmaxDescriptor, "layer"); + // Set Axis to 1 if CL or Neon until further Axes are supported. + if (factory.GetBackendId() == armnn::Compute::CpuAcc || factory.GetBackendId() == armnn::Compute::GpuAcc) + { + softmaxDescriptor.m_Axis = 1; + } + Layer* const layer = graph.AddLayer(softmaxDescriptor, "layer"); // Create extra layers. Layer* const input = graph.AddLayer(0, "input"); Layer* const output = graph.AddLayer(0, "output"); diff --git a/src/armnnCaffeParser/CaffeParser.cpp b/src/armnnCaffeParser/CaffeParser.cpp index 4027e9481b..c73365bae1 100644 --- a/src/armnnCaffeParser/CaffeParser.cpp +++ b/src/armnnCaffeParser/CaffeParser.cpp @@ -1220,6 +1220,7 @@ void CaffeParserBase::ParseSoftmaxLayer(const LayerParameter& layerParam) // Engine armnn::SoftmaxDescriptor softmaxDescriptor; + softmaxDescriptor.m_Axis = 1; armnn::IConnectableLayer* const softmaxLayer = m_Network->AddSoftmaxLayer( softmaxDescriptor, layerParam.name().c_str()); diff --git a/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp b/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp index 4591def34f..c7870d9672 100644 --- a/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp +++ b/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp @@ -134,7 +134,13 @@ std::string GetSoftmaxProfilerJson(const std::vector& backends INetworkPtr net(INetwork::Create()); IConnectableLayer* input = net->AddInputLayer(0, "input"); - IConnectableLayer* softmax = net->AddSoftmaxLayer(SoftmaxDescriptor(), "softmax"); + SoftmaxDescriptor softmaxDescriptor; + // Set Axis to 1 if CL or Neon until further Axes are supported. + if ( backends.front() == armnn::Compute::CpuAcc || backends.front() == armnn::Compute::GpuAcc) + { + softmaxDescriptor.m_Axis = 1; + } + IConnectableLayer* softmax = net->AddSoftmaxLayer(softmaxDescriptor, "softmax"); IConnectableLayer* output = net->AddOutputLayer(0, "output"); input->GetOutputSlot(0).Connect(softmax->GetInputSlot(0)); diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp index 7eb1dcf39a..b737daf7f4 100644 --- a/src/backends/cl/ClLayerSupport.cpp +++ b/src/backends/cl/ClLayerSupport.cpp @@ -594,8 +594,14 @@ bool ClLayerSupport::IsSoftmaxSupported(const TensorInfo& input, const SoftmaxDescriptor& descriptor, Optional reasonIfUnsupported) const { + if (!(descriptor.m_Axis == 1 || + (descriptor.m_Axis < 0 && static_cast(input.GetNumDimensions()) + descriptor.m_Axis == 1))) + { + SetValueChecked(reasonIfUnsupported, "Cl Softmax: Only supports Axis equal to 1."); + return false; + } ignore_unused(descriptor); - FORWARD_WORKLOAD_VALIDATE_FUNC(ClSoftmaxWorkloadValidate, reasonIfUnsupported, input, output); + FORWARD_WORKLOAD_VALIDATE_FUNC(ClSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor); } bool ClLayerSupport::IsSpaceToBatchNdSupported(const TensorInfo& input, diff --git a/src/backends/cl/workloads/ClSoftmaxBaseWorkload.cpp b/src/backends/cl/workloads/ClSoftmaxBaseWorkload.cpp index b1dc404a6f..2f6d380f94 100644 --- a/src/backends/cl/workloads/ClSoftmaxBaseWorkload.cpp +++ b/src/backends/cl/workloads/ClSoftmaxBaseWorkload.cpp @@ -6,6 +6,7 @@ #include "ClSoftmaxBaseWorkload.hpp" #include +#include #include @@ -13,12 +14,14 @@ namespace armnn { arm_compute::Status ClSoftmaxWorkloadValidate(const TensorInfo& input, - const TensorInfo& output) + const TensorInfo& output, + const SoftmaxDescriptor& descriptor) { const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input); const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output); - return arm_compute::CLSoftmaxLayer::validate(&aclInputInfo, &aclOutputInfo); + unsigned int aclAxis = ComputeSoftmaxAclAxis(input); + return arm_compute::CLSoftmaxLayer::validate(&aclInputInfo, &aclOutputInfo, descriptor.m_Beta, aclAxis); } } diff --git a/src/backends/cl/workloads/ClSoftmaxBaseWorkload.hpp b/src/backends/cl/workloads/ClSoftmaxBaseWorkload.hpp index b800056cdf..8d73060162 100644 --- a/src/backends/cl/workloads/ClSoftmaxBaseWorkload.hpp +++ b/src/backends/cl/workloads/ClSoftmaxBaseWorkload.hpp @@ -5,6 +5,7 @@ #pragma once +#include #include #include @@ -12,6 +13,7 @@ namespace armnn { arm_compute::Status ClSoftmaxWorkloadValidate(const TensorInfo& input, - const TensorInfo& output); + const TensorInfo& output, + const SoftmaxDescriptor& descriptor); } // namespace armnn diff --git a/src/backends/cl/workloads/ClSoftmaxFloatWorkload.cpp b/src/backends/cl/workloads/ClSoftmaxFloatWorkload.cpp index c78ab039ef..f2f8d17901 100644 --- a/src/backends/cl/workloads/ClSoftmaxFloatWorkload.cpp +++ b/src/backends/cl/workloads/ClSoftmaxFloatWorkload.cpp @@ -14,7 +14,7 @@ namespace armnn { ClSoftmaxFloatWorkload::ClSoftmaxFloatWorkload(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info, - std::shared_ptr& memoryManager) + std::shared_ptr& memoryManager) : FloatWorkload(descriptor, info) , m_SoftmaxLayer(memoryManager) { diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp index cb709c315a..e856210503 100644 --- a/src/backends/neon/NeonLayerSupport.cpp +++ b/src/backends/neon/NeonLayerSupport.cpp @@ -538,6 +538,12 @@ bool NeonLayerSupport::IsSoftmaxSupported(const TensorInfo& input, const SoftmaxDescriptor& descriptor, Optional reasonIfUnsupported) const { + if (!(descriptor.m_Axis == 1 || + (descriptor.m_Axis < 0 && static_cast(input.GetNumDimensions()) + descriptor.m_Axis == 1))) + { + SetValueChecked(reasonIfUnsupported, "Neon Softmax: Only supports Axis equal to 1."); + return false; + } FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor); } diff --git a/src/backends/neon/workloads/NeonSoftmaxBaseWorkload.cpp b/src/backends/neon/workloads/NeonSoftmaxBaseWorkload.cpp index b229bc48a2..8acb775344 100644 --- a/src/backends/neon/workloads/NeonSoftmaxBaseWorkload.cpp +++ b/src/backends/neon/workloads/NeonSoftmaxBaseWorkload.cpp @@ -6,6 +6,7 @@ #include "NeonSoftmaxBaseWorkload.hpp" #include +#include #include @@ -19,7 +20,8 @@ arm_compute::Status NeonSoftmaxWorkloadValidate(const TensorInfo& input, const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input); const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output); - return arm_compute::NESoftmaxLayer::validate(&aclInputInfo, &aclOutputInfo, descriptor.m_Beta); + unsigned int aclAxis = ComputeSoftmaxAclAxis(input); + return arm_compute::NESoftmaxLayer::validate(&aclInputInfo, &aclOutputInfo, descriptor.m_Beta, aclAxis); } } //namespace armnn diff --git a/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp b/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp index 19c50db15b..4dc913134c 100644 --- a/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp +++ b/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp @@ -23,8 +23,8 @@ NeonSoftmaxFloatWorkload::NeonSoftmaxFloatWorkload(const SoftmaxQueueDescriptor& arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); - unsigned int aclAxis = ComputeSoftmaxAclAxis(info.m_InputTensorInfos[0]); auto layer = std::make_unique(memoryManager); + unsigned int aclAxis = ComputeSoftmaxAclAxis(info.m_InputTensorInfos[0]); layer->configure(&input, &output, m_Data.m_Parameters.m_Beta, aclAxis); m_SoftmaxLayer.reset(layer.release()); } diff --git a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp b/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp index 363c1502f5..8e83914dcb 100644 --- a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp +++ b/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp @@ -33,9 +33,8 @@ NeonSoftmaxUint8Workload::NeonSoftmaxUint8Workload(const SoftmaxQueueDescriptor& "Invalid quantization for output. Only scale = 1.0f / 256.0f and offset = 0 supported"); } - unsigned int aclAxis = ComputeSoftmaxAclAxis(info.m_InputTensorInfos[0]); - auto layer = std::make_unique(memoryManager); + unsigned int aclAxis = ComputeSoftmaxAclAxis(info.m_InputTensorInfos[0]); layer->configure(&input, &output, descriptor.m_Parameters.m_Beta, aclAxis); m_SoftmaxLayer.reset(layer.release()); } -- cgit v1.2.1