aboutsummaryrefslogtreecommitdiff
path: root/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp
diff options
context:
space:
mode:
authorFerran Balaguer <ferran.balaguer@arm.com>2019-06-24 12:43:38 +0100
committerFerran Balaguer Arm <ferran.balaguer@arm.com>2019-06-26 15:22:31 +0000
commitb2b5a26e7b7250532df189cd7cf7cf1d4b528a28 (patch)
tree6cbb1d44f074455a58ab00c2f6edd5ab5c3e8ebe /src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp
parent5191033c76330509666e0993857b7286e2a325fc (diff)
downloadarmnn-b2b5a26e7b7250532df189cd7cf7cf1d4b528a28.tar.gz
Update the CL pin to the latest master
* Update SoftMaxUint8Workload for CL and NEON to deal with quantization parameters as vectors. * Change Sigmoid Activation function QAsymm8 tests to use scale=1.f/256.f and offset=0 as quatization output parameters. !android-nn-driver:1417 Signed-off-by: Ferran Balaguer <ferran.balaguer@arm.com> Change-Id: Ief91f10193fbbbad0c0124ece41f0bf4e0dcd992
Diffstat (limited to 'src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp')
-rw-r--r--src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp6
1 files changed, 5 insertions, 1 deletions
diff --git a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp b/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp
index 7b2d29086e..d1e49d954c 100644
--- a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp
+++ b/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp
@@ -25,11 +25,15 @@ NeonSoftmaxUint8Workload::NeonSoftmaxUint8Workload(const SoftmaxQueueDescriptor&
const auto outputQuantization = output.info()->quantization_info();
- if ((outputQuantization.scale != (1.0f / 256.0f)) || (outputQuantization.offset != 0))
+ if (((!outputQuantization.scale.empty()) && (outputQuantization.scale[0] != (1.0f / 256.0f))) ||
+ ((!outputQuantization.offset.empty()) && (outputQuantization.offset[0] != 0)) ||
+ (outputQuantization.scale.empty()) ||
+ (outputQuantization.offset.empty()))
{
throw InvalidArgumentException(
"Invalid quantization for output. Only scale = 1.0f / 256.0f and offset = 0 supported");
}
+
unsigned int aclAxis = ComputeSoftmaxAclAxis(info.m_InputTensorInfos[0]);
auto layer = std::make_unique<arm_compute::NESoftmaxLayer>(memoryManager);