diff options
-rwxr-xr-x | scripts/get_compute_library.sh | 2 | ||||
-rw-r--r-- | src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp | 7 | ||||
-rw-r--r-- | src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp | 7 |
3 files changed, 7 insertions, 9 deletions
diff --git a/scripts/get_compute_library.sh b/scripts/get_compute_library.sh index ec84209ec4..316e3000d5 100755 --- a/scripts/get_compute_library.sh +++ b/scripts/get_compute_library.sh @@ -10,7 +10,7 @@ CMD=$( basename $0 ) #DEFAULT_CLFRAMEWORKREVISION="branches/arm_compute_19_05" # Release 19.05 # # For pinning to a revision use this: -DEFAULT_CLFRAMEWORKREVISION="3689fcd5915cd902cb4ea5f618f2a6e42f6dc4a1" +DEFAULT_CLFRAMEWORKREVISION="7bb56c6337997281df10fa28ad7924c921b920eb" usage() { echo "Usage: $CMD (Use the default clframework SHA)" diff --git a/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp b/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp index 84d735cdf7..ce2a9e6356 100644 --- a/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp +++ b/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp @@ -25,10 +25,9 @@ ClSoftmaxUint8Workload::ClSoftmaxUint8Workload(const SoftmaxQueueDescriptor& des const auto outputQuantization = output.info()->quantization_info(); - if (((!outputQuantization.scale.empty()) && (outputQuantization.scale[0] != (1.0f / 256.0f))) || - ((!outputQuantization.offset.empty()) && (outputQuantization.offset[0] != 0)) || - (outputQuantization.scale.empty()) || - (outputQuantization.offset.empty())) + if ((!outputQuantization.scale().empty() && outputQuantization.scale()[0] != (1.0f / 256.0f)) || + (!outputQuantization.offset().empty() && outputQuantization.offset()[0] != 0) || + outputQuantization.scale().empty() || outputQuantization.offset().empty()) { throw InvalidArgumentException( "Invalid quantization for output. Only scale = 1.0f / 256.0f and offset = 0 supported"); diff --git a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp b/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp index d1e49d954c..363c1502f5 100644 --- a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp +++ b/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp @@ -25,10 +25,9 @@ NeonSoftmaxUint8Workload::NeonSoftmaxUint8Workload(const SoftmaxQueueDescriptor& const auto outputQuantization = output.info()->quantization_info(); - if (((!outputQuantization.scale.empty()) && (outputQuantization.scale[0] != (1.0f / 256.0f))) || - ((!outputQuantization.offset.empty()) && (outputQuantization.offset[0] != 0)) || - (outputQuantization.scale.empty()) || - (outputQuantization.offset.empty())) + if ((!outputQuantization.scale().empty() && outputQuantization.scale()[0] != (1.0f / 256.0f)) || + (!outputQuantization.offset().empty() && outputQuantization.offset()[0] != 0) || + outputQuantization.scale().empty() || outputQuantization.offset().empty()) { throw InvalidArgumentException( "Invalid quantization for output. Only scale = 1.0f / 256.0f and offset = 0 supported"); |