aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2020-08-28 15:13:05 +0100
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2020-09-03 15:33:57 +0000
commite386676035e56a8354c4c5fc1be5f070f8e666e4 (patch)
tree27155c77508d865dacc1adcacb007a1f98e6e9f8
parent9e9c7f5067c287b9989ee49d34825809d7f611dd (diff)
downloadarmnn-e386676035e56a8354c4c5fc1be5f070f8e666e4.tar.gz
Update ACL pin to ec4dee8c68a3d0f6d63db184bfb2f4589429778e
* Axis for LogSoftMax and SoftMax can be either positive or negative Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: I36b0507ad7600c0a98c3b8be3c0350045ee05b84 Signed-off-by: Nikhil Raj <nikhil.raj@arm.com>
-rwxr-xr-xscripts/get_compute_library.sh4
-rw-r--r--src/backends/cl/workloads/ClLogSoftmaxWorkload.cpp6
-rw-r--r--src/backends/cl/workloads/ClSoftmaxWorkload.cpp6
-rw-r--r--src/backends/neon/workloads/NeonLogSoftmaxWorkload.cpp10
-rw-r--r--src/backends/neon/workloads/NeonSoftmaxWorkload.cpp10
5 files changed, 14 insertions, 22 deletions
diff --git a/scripts/get_compute_library.sh b/scripts/get_compute_library.sh
index 8ea0360867..02e2a0ddf0 100755
--- a/scripts/get_compute_library.sh
+++ b/scripts/get_compute_library.sh
@@ -7,10 +7,10 @@
CMD=$( basename $0 )
# For pinning to a ref use this:
-DEFAULT_CLFRAMEWORKREVISION="branches/arm_compute_20_08" # Release 20.08
+#DEFAULT_CLFRAMEWORKREVISION="branches/arm_compute_20_08" # Release 20.08
#
# For pinning to a revision use this:
-#DEFAULT_CLFRAMEWORKREVISION="547b2e7aa07db4dd41f99e492c40710f2548c6ba" #COMPMID-3702: Update documentation
+DEFAULT_CLFRAMEWORKREVISION="ec4dee8c68a3d0f6d63db184bfb2f4589429778e" #COMPMID-3750: Disable asm kernels when shifts are negative.
usage() {
echo "Usage: $CMD (Use the default clframework SHA)"
diff --git a/src/backends/cl/workloads/ClLogSoftmaxWorkload.cpp b/src/backends/cl/workloads/ClLogSoftmaxWorkload.cpp
index c62c9d24f6..6d53523291 100644
--- a/src/backends/cl/workloads/ClLogSoftmaxWorkload.cpp
+++ b/src/backends/cl/workloads/ClLogSoftmaxWorkload.cpp
@@ -21,8 +21,7 @@ arm_compute::Status ClLogSoftmaxWorkloadValidate(const TensorInfo& input,
const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input);
const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
- int aclAxis_int = ComputeAclAxis(descriptor.m_Axis, input);
- unsigned int aclAxis = ComputePositiveAxis(aclAxis_int, input);
+ int aclAxis = ComputeAclAxis(descriptor.m_Axis, input);
return arm_compute::CLLogSoftmaxLayer::validate(&aclInputInfo, &aclOutputInfo, descriptor.m_Beta, aclAxis);
}
@@ -36,8 +35,7 @@ ClLogSoftmaxWorkload::ClLogSoftmaxWorkload(const LogSoftmaxQueueDescriptor& desc
arm_compute::ICLTensor& input = static_cast<ClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ICLTensor& output = static_cast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
- int aclAxis_int = ComputeAclAxis(m_Data.m_Parameters.m_Axis, info.m_InputTensorInfos[0]);
- unsigned int aclAxis = ComputePositiveAxis(aclAxis_int, info.m_InputTensorInfos[0]);
+ int aclAxis = ComputeAclAxis(m_Data.m_Parameters.m_Axis, info.m_InputTensorInfos[0]);
m_LogSoftmaxLayer.configure(&input, &output, m_Data.m_Parameters.m_Beta, aclAxis);
}
diff --git a/src/backends/cl/workloads/ClSoftmaxWorkload.cpp b/src/backends/cl/workloads/ClSoftmaxWorkload.cpp
index 37746c83a6..8bc2a765ed 100644
--- a/src/backends/cl/workloads/ClSoftmaxWorkload.cpp
+++ b/src/backends/cl/workloads/ClSoftmaxWorkload.cpp
@@ -21,8 +21,7 @@ arm_compute::Status ClSoftmaxWorkloadValidate(const TensorInfo& input,
const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input);
const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
- int aclAxis_int = ComputeAclAxis(descriptor.m_Axis, input);
- unsigned int aclAxis = ComputePositiveAxis(aclAxis_int, input);
+ int aclAxis = ComputeAclAxis(descriptor.m_Axis, input);
return arm_compute::CLSoftmaxLayer::validate(&aclInputInfo, &aclOutputInfo, descriptor.m_Beta, aclAxis);
}
@@ -36,8 +35,7 @@ ClSoftmaxWorkload::ClSoftmaxWorkload(const SoftmaxQueueDescriptor& descriptor, c
arm_compute::ICLTensor& input = static_cast<ClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ICLTensor& output = static_cast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
- int aclAxis_int = ComputeAclAxis(m_Data.m_Parameters.m_Axis, info.m_InputTensorInfos[0]);
- unsigned int aclAxis = ComputePositiveAxis(aclAxis_int, info.m_InputTensorInfos[0]);
+ int aclAxis = ComputeAclAxis(m_Data.m_Parameters.m_Axis, info.m_InputTensorInfos[0]);
m_SoftmaxLayer.configure(&input, &output, m_Data.m_Parameters.m_Beta, aclAxis);
}
diff --git a/src/backends/neon/workloads/NeonLogSoftmaxWorkload.cpp b/src/backends/neon/workloads/NeonLogSoftmaxWorkload.cpp
index 39ff279728..ba5c9000f4 100644
--- a/src/backends/neon/workloads/NeonLogSoftmaxWorkload.cpp
+++ b/src/backends/neon/workloads/NeonLogSoftmaxWorkload.cpp
@@ -23,12 +23,11 @@ arm_compute::Status NeonLogSoftmaxWorkloadValidate(const TensorInfo& input,
const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input);
const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
- int aclAxis_int = ComputeAclAxis(descriptor.m_Axis, input);
- unsigned int aclAxis = ComputePositiveAxis(aclAxis_int, input);
+ int aclAxis = ComputeAclAxis(descriptor.m_Axis, input);
return arm_compute::NELogSoftmaxLayer::validate(&aclInputInfo,
&aclOutputInfo,
descriptor.m_Beta,
- static_cast<int>(aclAxis));
+ aclAxis);
}
NeonLogSoftmaxWorkload::NeonLogSoftmaxWorkload(const LogSoftmaxQueueDescriptor& descriptor,
@@ -42,9 +41,8 @@ NeonLogSoftmaxWorkload::NeonLogSoftmaxWorkload(const LogSoftmaxQueueDescriptor&
arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
auto layer = std::make_unique<arm_compute::NELogSoftmaxLayer>(memoryManager);
- int aclAxis_int = ComputeAclAxis(m_Data.m_Parameters.m_Axis, info.m_InputTensorInfos[0]);
- unsigned int aclAxis = ComputePositiveAxis(aclAxis_int, info.m_InputTensorInfos[0]);
- layer->configure(&input, &output, m_Data.m_Parameters.m_Beta, static_cast<int>(aclAxis));
+ int aclAxis = ComputeAclAxis(m_Data.m_Parameters.m_Axis, info.m_InputTensorInfos[0]);
+ layer->configure(&input, &output, m_Data.m_Parameters.m_Beta, aclAxis);
m_LogSoftmaxLayer.reset(layer.release());
}
diff --git a/src/backends/neon/workloads/NeonSoftmaxWorkload.cpp b/src/backends/neon/workloads/NeonSoftmaxWorkload.cpp
index 7e1501fa3e..505844e24a 100644
--- a/src/backends/neon/workloads/NeonSoftmaxWorkload.cpp
+++ b/src/backends/neon/workloads/NeonSoftmaxWorkload.cpp
@@ -23,12 +23,11 @@ arm_compute::Status NeonSoftmaxWorkloadValidate(const TensorInfo& input,
const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input);
const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
- int aclAxis_int = ComputeAclAxis(descriptor.m_Axis, input);
- unsigned int aclAxis = ComputePositiveAxis(aclAxis_int, input);
+ int aclAxis = ComputeAclAxis(descriptor.m_Axis, input);
return arm_compute::NESoftmaxLayer::validate(&aclInputInfo,
&aclOutputInfo,
descriptor.m_Beta,
- static_cast<int>(aclAxis));
+ aclAxis);
}
NeonSoftmaxWorkload::NeonSoftmaxWorkload(const SoftmaxQueueDescriptor& descriptor,
@@ -42,9 +41,8 @@ NeonSoftmaxWorkload::NeonSoftmaxWorkload(const SoftmaxQueueDescriptor& descripto
arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
auto layer = std::make_unique<arm_compute::NESoftmaxLayer>(memoryManager);
- int aclAxis_int = ComputeAclAxis(m_Data.m_Parameters.m_Axis, info.m_InputTensorInfos[0]);
- unsigned int aclAxis = ComputePositiveAxis(aclAxis_int, info.m_InputTensorInfos[0]);
- layer->configure(&input, &output, m_Data.m_Parameters.m_Beta, static_cast<int>(aclAxis));
+ int aclAxis = ComputeAclAxis(m_Data.m_Parameters.m_Axis, info.m_InputTensorInfos[0]);
+ layer->configure(&input, &output, m_Data.m_Parameters.m_Beta, aclAxis);
m_SoftmaxLayer.reset(layer.release());
}