aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorColm Donelan <Colm.Donelan@arm.com>2019-08-15 16:03:17 +0100
committerÁron Virginás-Tar <aron.virginas-tar@arm.com>2019-08-16 15:23:24 +0000
commitc3c5fc259c0bfcab9f0096b7b68fc13b0937296a (patch)
tree0a2e54b66828bc788186d13b006945b14dcae8fc
parentdba634fd6a66a9e033a1925b0b26c80b270bbf21 (diff)
downloadarmnn-c3c5fc259c0bfcab9f0096b7b68fc13b0937296a.tar.gz
IVGCVSW-3620 Fix Hal 1.2 Softmax test failures on GpuAcc and CpuAcc
The following NeuralNetworkTests tests were failing on GpuAcc and CpuAcc: GeneratedTests.softmax_v1_2_relaxed GeneratedTests.softmax_v1_2_quant8 GeneratedTests.softmax_v1_2_2 GeneratedTests.softmax_v1_2_relaxed_2 GeneratedTests.softmax_v1_2_quant8_2 The default value for Softmax axis parameter in Android is -1 but is 1 in ACL. Detect and handle this in ArmComputeUtils.ComputeSoftmaxAclAxis. Signed-off-by: Colm Donelan <Colm.Donelan@arm.com> Change-Id: Ibb0660e4cb0dc6bd4c804c4397fbd61f38acdd9c
-rw-r--r--src/backends/aclCommon/ArmComputeUtils.hpp10
-rw-r--r--src/backends/cl/workloads/ClSoftmaxBaseWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClSoftmaxFloatWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonSoftmaxBaseWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp2
7 files changed, 14 insertions, 8 deletions
diff --git a/src/backends/aclCommon/ArmComputeUtils.hpp b/src/backends/aclCommon/ArmComputeUtils.hpp
index 0f56160051..4d690901c6 100644
--- a/src/backends/aclCommon/ArmComputeUtils.hpp
+++ b/src/backends/aclCommon/ArmComputeUtils.hpp
@@ -135,9 +135,15 @@ inline arm_compute::InterpolationPolicy ConvertResizeMethodToAclInterpolationPol
}
}
-inline unsigned int ComputeSoftmaxAclAxis(const armnn::TensorInfo& tensor)
+inline unsigned int ComputeSoftmaxAclAxis(const SoftmaxDescriptor& softmaxDesc, const armnn::TensorInfo& tensor)
{
- unsigned int dim = tensor.GetNumDimensions();
+ // Detect the Android default value of -1 and return the ACL default value of 1.
+ if (softmaxDesc.m_Axis == -1)
+ {
+ return 1;
+ }
+
+ unsigned int dim = tensor.GetNumDimensions();
BOOST_ASSERT(dim != 0);
diff --git a/src/backends/cl/workloads/ClSoftmaxBaseWorkload.cpp b/src/backends/cl/workloads/ClSoftmaxBaseWorkload.cpp
index 2f6d380f94..a355ba0c2d 100644
--- a/src/backends/cl/workloads/ClSoftmaxBaseWorkload.cpp
+++ b/src/backends/cl/workloads/ClSoftmaxBaseWorkload.cpp
@@ -20,7 +20,7 @@ arm_compute::Status ClSoftmaxWorkloadValidate(const TensorInfo& input,
const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input);
const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
- unsigned int aclAxis = ComputeSoftmaxAclAxis(input);
+ unsigned int aclAxis = ComputeSoftmaxAclAxis(descriptor, input);
return arm_compute::CLSoftmaxLayer::validate(&aclInputInfo, &aclOutputInfo, descriptor.m_Beta, aclAxis);
}
diff --git a/src/backends/cl/workloads/ClSoftmaxFloatWorkload.cpp b/src/backends/cl/workloads/ClSoftmaxFloatWorkload.cpp
index f2f8d17901..adb4872b80 100644
--- a/src/backends/cl/workloads/ClSoftmaxFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClSoftmaxFloatWorkload.cpp
@@ -23,7 +23,7 @@ ClSoftmaxFloatWorkload::ClSoftmaxFloatWorkload(const SoftmaxQueueDescriptor& des
arm_compute::ICLTensor& input = static_cast<ClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ICLTensor& output = static_cast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
- unsigned int aclAxis = ComputeSoftmaxAclAxis(info.m_InputTensorInfos[0]);
+ unsigned int aclAxis = ComputeSoftmaxAclAxis(m_Data.m_Parameters, info.m_InputTensorInfos[0]);
m_SoftmaxLayer.configure(&input, &output, m_Data.m_Parameters.m_Beta, aclAxis);
}
diff --git a/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp b/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp
index ce2a9e6356..f14ea11c82 100644
--- a/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp
+++ b/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp
@@ -33,7 +33,7 @@ ClSoftmaxUint8Workload::ClSoftmaxUint8Workload(const SoftmaxQueueDescriptor& des
"Invalid quantization for output. Only scale = 1.0f / 256.0f and offset = 0 supported");
}
- unsigned int aclAxis = ComputeSoftmaxAclAxis(info.m_InputTensorInfos[0]);
+ unsigned int aclAxis = ComputeSoftmaxAclAxis(m_Data.m_Parameters, info.m_InputTensorInfos[0]);
m_SoftmaxLayer.configure(&input, &output, descriptor.m_Parameters.m_Beta, aclAxis);
}
diff --git a/src/backends/neon/workloads/NeonSoftmaxBaseWorkload.cpp b/src/backends/neon/workloads/NeonSoftmaxBaseWorkload.cpp
index 8acb775344..41ebfb9fd3 100644
--- a/src/backends/neon/workloads/NeonSoftmaxBaseWorkload.cpp
+++ b/src/backends/neon/workloads/NeonSoftmaxBaseWorkload.cpp
@@ -20,7 +20,7 @@ arm_compute::Status NeonSoftmaxWorkloadValidate(const TensorInfo& input,
const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input);
const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
- unsigned int aclAxis = ComputeSoftmaxAclAxis(input);
+ unsigned int aclAxis = ComputeSoftmaxAclAxis(descriptor, input);
return arm_compute::NESoftmaxLayer::validate(&aclInputInfo, &aclOutputInfo, descriptor.m_Beta, aclAxis);
}
diff --git a/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp b/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp
index 4dc913134c..152d19cc04 100644
--- a/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp
@@ -24,7 +24,7 @@ NeonSoftmaxFloatWorkload::NeonSoftmaxFloatWorkload(const SoftmaxQueueDescriptor&
arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
auto layer = std::make_unique<arm_compute::NESoftmaxLayer>(memoryManager);
- unsigned int aclAxis = ComputeSoftmaxAclAxis(info.m_InputTensorInfos[0]);
+ unsigned int aclAxis = ComputeSoftmaxAclAxis(m_Data.m_Parameters, info.m_InputTensorInfos[0]);
layer->configure(&input, &output, m_Data.m_Parameters.m_Beta, aclAxis);
m_SoftmaxLayer.reset(layer.release());
}
diff --git a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp b/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp
index 8e83914dcb..15a7066861 100644
--- a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp
+++ b/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp
@@ -34,7 +34,7 @@ NeonSoftmaxUint8Workload::NeonSoftmaxUint8Workload(const SoftmaxQueueDescriptor&
}
auto layer = std::make_unique<arm_compute::NESoftmaxLayer>(memoryManager);
- unsigned int aclAxis = ComputeSoftmaxAclAxis(info.m_InputTensorInfos[0]);
+ unsigned int aclAxis = ComputeSoftmaxAclAxis(m_Data.m_Parameters, info.m_InputTensorInfos[0]);
layer->configure(&input, &output, descriptor.m_Parameters.m_Beta, aclAxis);
m_SoftmaxLayer.reset(layer.release());
}