From 3b9383548309a8f9121668826a628ec250c57a1c Mon Sep 17 00:00:00 2001 From: Francis Murtagh Date: Fri, 26 Jul 2019 15:44:17 +0100 Subject: IVGCVSW-3554 Update workloads to pass Softmax Axis Parameter to Backends * Add check in CL and Neon to ensure axis is 1 otherwise return unsupported. * Edit CreateWorkload test and JsonPrinter test to ensure axis of 1. Change-Id: I499b405532e26fefc2dd1c18b6dc6005813b5604 Signed-off-by: Francis Murtagh --- src/backends/neon/workloads/NeonSoftmaxBaseWorkload.cpp | 4 +++- src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp | 2 +- src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp | 3 +-- 3 files changed, 5 insertions(+), 4 deletions(-) (limited to 'src/backends/neon/workloads') diff --git a/src/backends/neon/workloads/NeonSoftmaxBaseWorkload.cpp b/src/backends/neon/workloads/NeonSoftmaxBaseWorkload.cpp index b229bc48a2..8acb775344 100644 --- a/src/backends/neon/workloads/NeonSoftmaxBaseWorkload.cpp +++ b/src/backends/neon/workloads/NeonSoftmaxBaseWorkload.cpp @@ -6,6 +6,7 @@ #include "NeonSoftmaxBaseWorkload.hpp" #include +#include #include @@ -19,7 +20,8 @@ arm_compute::Status NeonSoftmaxWorkloadValidate(const TensorInfo& input, const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input); const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output); - return arm_compute::NESoftmaxLayer::validate(&aclInputInfo, &aclOutputInfo, descriptor.m_Beta); + unsigned int aclAxis = ComputeSoftmaxAclAxis(input); + return arm_compute::NESoftmaxLayer::validate(&aclInputInfo, &aclOutputInfo, descriptor.m_Beta, aclAxis); } } //namespace armnn diff --git a/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp b/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp index 19c50db15b..4dc913134c 100644 --- a/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp +++ b/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp @@ -23,8 +23,8 @@ NeonSoftmaxFloatWorkload::NeonSoftmaxFloatWorkload(const SoftmaxQueueDescriptor& arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); - unsigned int aclAxis = ComputeSoftmaxAclAxis(info.m_InputTensorInfos[0]); auto layer = std::make_unique(memoryManager); + unsigned int aclAxis = ComputeSoftmaxAclAxis(info.m_InputTensorInfos[0]); layer->configure(&input, &output, m_Data.m_Parameters.m_Beta, aclAxis); m_SoftmaxLayer.reset(layer.release()); } diff --git a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp b/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp index 363c1502f5..8e83914dcb 100644 --- a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp +++ b/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp @@ -33,9 +33,8 @@ NeonSoftmaxUint8Workload::NeonSoftmaxUint8Workload(const SoftmaxQueueDescriptor& "Invalid quantization for output. Only scale = 1.0f / 256.0f and offset = 0 supported"); } - unsigned int aclAxis = ComputeSoftmaxAclAxis(info.m_InputTensorInfos[0]); - auto layer = std::make_unique(memoryManager); + unsigned int aclAxis = ComputeSoftmaxAclAxis(info.m_InputTensorInfos[0]); layer->configure(&input, &output, descriptor.m_Parameters.m_Beta, aclAxis); m_SoftmaxLayer.reset(layer.release()); } -- cgit v1.2.1