From efc5da416a2f93e52bce60b225cc70eba028a5c2 Mon Sep 17 00:00:00 2001 From: Teresa Charlin Date: Wed, 13 May 2020 15:16:12 +0100 Subject: Update ACL pin to 4efe5dc9b39a87eface43e7468e08279976ae9ef * change neon softmax axis to be int Signed-off-by: Teresa Charlin Change-Id: I7a9486c11494ed4993abd683a50d6b4483f283e1 --- scripts/get_compute_library.sh | 2 +- src/backends/neon/workloads/NeonSoftmaxWorkload.cpp | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/get_compute_library.sh b/scripts/get_compute_library.sh index 08218e4ce6..7c3299eb51 100755 --- a/scripts/get_compute_library.sh +++ b/scripts/get_compute_library.sh @@ -10,7 +10,7 @@ CMD=$( basename $0 ) #DEFAULT_CLFRAMEWORKREVISION="branches/arm_compute_20_02" # Release 20.02 # # For pinning to a revision use this: -DEFAULT_CLFRAMEWORKREVISION="6f8b17dedb7b53b550e6210fd1c78c3a3e086271" #[ONCPUML-7] arm_compute support for ND parallelism. +DEFAULT_CLFRAMEWORKREVISION="4efe5dc9b39a87eface43e7468e08279976ae9ef" # COMPMID-3484: Regression in Transpose convolution Android R CTS test. usage() { echo "Usage: $CMD (Use the default clframework SHA)" diff --git a/src/backends/neon/workloads/NeonSoftmaxWorkload.cpp b/src/backends/neon/workloads/NeonSoftmaxWorkload.cpp index 35e2e0e3fc..b36bf7695f 100644 --- a/src/backends/neon/workloads/NeonSoftmaxWorkload.cpp +++ b/src/backends/neon/workloads/NeonSoftmaxWorkload.cpp @@ -23,7 +23,7 @@ arm_compute::Status NeonSoftmaxWorkloadValidate(const TensorInfo& input, const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input); const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output); - unsigned int aclAxis = ComputeSoftmaxAclAxis(descriptor, input); + int aclAxis = ComputeSoftmaxAclAxis(descriptor, input); return arm_compute::NESoftmaxLayer::validate(&aclInputInfo, &aclOutputInfo, descriptor.m_Beta, aclAxis); } @@ -38,7 +38,7 @@ NeonSoftmaxWorkload::NeonSoftmaxWorkload(const SoftmaxQueueDescriptor& descripto arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); auto layer = std::make_unique(memoryManager); - unsigned int aclAxis = ComputeSoftmaxAclAxis(m_Data.m_Parameters, info.m_InputTensorInfos[0]); + int aclAxis = ComputeSoftmaxAclAxis(m_Data.m_Parameters, info.m_InputTensorInfos[0]); layer->configure(&input, &output, m_Data.m_Parameters.m_Beta, aclAxis); m_SoftmaxLayer.reset(layer.release()); } -- cgit v1.2.1