aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJames Conroy <james.conroy@arm.com>2019-07-04 16:56:44 +0100
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2019-07-05 14:41:43 +0000
commit33fa0a66a57ddfd1896478301de7ee047aae5e89 (patch)
treea4fe4a13f625750e0c6d72a1b6164932db0d8cdb
parent9b461486a863229bb71850f403099ec0713aef68 (diff)
downloadarmnn-33fa0a66a57ddfd1896478301de7ee047aae5e89.tar.gz
IVGCVSW-3401 Update ACL pin to latest master
* Updated ACL pin to latest master. * Minor changes to Softmax Neon/CL uint8 workloads to reflect refactoring in ACL. !android-nn-driver:1476 Change-Id: I1c5005ddbcccdb41d8cb09d3fa61cf3ce0e9ffdb Signed-off-by: James Conroy <james.conroy@arm.com>
-rwxr-xr-xscripts/get_compute_library.sh2
-rw-r--r--src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp7
-rw-r--r--src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp7
3 files changed, 7 insertions, 9 deletions
diff --git a/scripts/get_compute_library.sh b/scripts/get_compute_library.sh
index ec84209ec4..316e3000d5 100755
--- a/scripts/get_compute_library.sh
+++ b/scripts/get_compute_library.sh
@@ -10,7 +10,7 @@ CMD=$( basename $0 )
#DEFAULT_CLFRAMEWORKREVISION="branches/arm_compute_19_05" # Release 19.05
#
# For pinning to a revision use this:
-DEFAULT_CLFRAMEWORKREVISION="3689fcd5915cd902cb4ea5f618f2a6e42f6dc4a1"
+DEFAULT_CLFRAMEWORKREVISION="7bb56c6337997281df10fa28ad7924c921b920eb"
usage() {
echo "Usage: $CMD (Use the default clframework SHA)"
diff --git a/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp b/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp
index 84d735cdf7..ce2a9e6356 100644
--- a/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp
+++ b/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp
@@ -25,10 +25,9 @@ ClSoftmaxUint8Workload::ClSoftmaxUint8Workload(const SoftmaxQueueDescriptor& des
const auto outputQuantization = output.info()->quantization_info();
- if (((!outputQuantization.scale.empty()) && (outputQuantization.scale[0] != (1.0f / 256.0f))) ||
- ((!outputQuantization.offset.empty()) && (outputQuantization.offset[0] != 0)) ||
- (outputQuantization.scale.empty()) ||
- (outputQuantization.offset.empty()))
+ if ((!outputQuantization.scale().empty() && outputQuantization.scale()[0] != (1.0f / 256.0f)) ||
+ (!outputQuantization.offset().empty() && outputQuantization.offset()[0] != 0) ||
+ outputQuantization.scale().empty() || outputQuantization.offset().empty())
{
throw InvalidArgumentException(
"Invalid quantization for output. Only scale = 1.0f / 256.0f and offset = 0 supported");
diff --git a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp b/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp
index d1e49d954c..363c1502f5 100644
--- a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp
+++ b/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp
@@ -25,10 +25,9 @@ NeonSoftmaxUint8Workload::NeonSoftmaxUint8Workload(const SoftmaxQueueDescriptor&
const auto outputQuantization = output.info()->quantization_info();
- if (((!outputQuantization.scale.empty()) && (outputQuantization.scale[0] != (1.0f / 256.0f))) ||
- ((!outputQuantization.offset.empty()) && (outputQuantization.offset[0] != 0)) ||
- (outputQuantization.scale.empty()) ||
- (outputQuantization.offset.empty()))
+ if ((!outputQuantization.scale().empty() && outputQuantization.scale()[0] != (1.0f / 256.0f)) ||
+ (!outputQuantization.offset().empty() && outputQuantization.offset()[0] != 0) ||
+ outputQuantization.scale().empty() || outputQuantization.offset().empty())
{
throw InvalidArgumentException(
"Invalid quantization for output. Only scale = 1.0f / 256.0f and offset = 0 supported");