From b2b5a26e7b7250532df189cd7cf7cf1d4b528a28 Mon Sep 17 00:00:00 2001 From: Ferran Balaguer Date: Mon, 24 Jun 2019 12:43:38 +0100 Subject: Update the CL pin to the latest master * Update SoftMaxUint8Workload for CL and NEON to deal with quantization parameters as vectors. * Change Sigmoid Activation function QAsymm8 tests to use scale=1.f/256.f and offset=0 as quatization output parameters. !android-nn-driver:1417 Signed-off-by: Ferran Balaguer Change-Id: Ief91f10193fbbbad0c0124ece41f0bf4e0dcd992 --- .../backendsCommon/test/ActivationTestImpl.hpp | 37 +++++++++++++++++----- .../cl/workloads/ClSoftmaxUint8Workload.cpp | 5 ++- .../neon/workloads/NeonSoftmaxUint8Workload.cpp | 6 +++- 3 files changed, 38 insertions(+), 10 deletions(-) (limited to 'src') diff --git a/src/backends/backendsCommon/test/ActivationTestImpl.hpp b/src/backends/backendsCommon/test/ActivationTestImpl.hpp index 9088d18858..282e6438d0 100644 --- a/src/backends/backendsCommon/test/ActivationTestImpl.hpp +++ b/src/backends/backendsCommon/test/ActivationTestImpl.hpp @@ -392,9 +392,11 @@ LayerTestResult SimpleActivationTest( armnn::ActivationFunction activationFunction, float activationParameterA, float activationParameterB, - float qScale, - int32_t qOffset, + float scale, + int32_t offset, const std::vector& inputData, + float outScale, + int32_t outOffset, const std::vector& outputExpectedData) { constexpr static unsigned int inputWidth = 16u; @@ -413,15 +415,15 @@ LayerTestResult SimpleActivationTest( // Set quantization parameters if the requested type is a quantized type. if(armnn::IsQuantizedType()) { - inputTensorInfo.SetQuantizationScale(qScale); - inputTensorInfo.SetQuantizationOffset(qOffset); - outputTensorInfo.SetQuantizationScale(qScale); - outputTensorInfo.SetQuantizationOffset(qOffset); + inputTensorInfo.SetQuantizationScale(scale); + inputTensorInfo.SetQuantizationOffset(offset); + outputTensorInfo.SetQuantizationScale(outScale); + outputTensorInfo.SetQuantizationOffset(outOffset); } LayerTestResult result(inputTensorInfo); - auto input = MakeTensor(inputTensorInfo, QuantizedVector(qScale, qOffset, inputData)); + auto input = MakeTensor(inputTensorInfo, QuantizedVector(scale, offset, inputData)); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); @@ -448,7 +450,8 @@ LayerTestResult SimpleActivationTest( CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); // Calculated manually. - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, outputExpectedData)); + result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(outScale, outOffset, + outputExpectedData)); return result; } @@ -483,6 +486,8 @@ LayerTestResult SimpleSigmoidTestCommon( qScale, qOffset, inputData, + 1.f / 256.f, + 0, outputExpectedData); } @@ -537,6 +542,8 @@ LayerTestResult ReLuTestCommon( qScale, qOffset, inputData, + qScale, + qOffset, outputExpectedData); } @@ -594,6 +601,8 @@ LayerTestResult BoundedReLuTestCommon( qScale, qOffset, inputData, + qScale, + qOffset, outputExpectedData); } @@ -636,6 +645,8 @@ LayerTestResult SoftReLuTestCommon( qScale, qOffset, inputData, + qScale, + qOffset, outputExpectedData); } @@ -691,6 +702,8 @@ LayerTestResult LeakyReLuTestCommon( qScale, qOffset, inputData, + qScale, + qOffset, outputExpectedData); } @@ -745,6 +758,8 @@ LayerTestResult AbsTestCommon( qScale, qOffset, inputData, + qScale, + qOffset, outputExpectedData); } @@ -799,6 +814,8 @@ LayerTestResult SqrtTestCommon( qScale, qOffset, inputData, + qScale, + qOffset, outputExpectedData); } @@ -853,6 +870,8 @@ LayerTestResult SquareTestCommon( qScale, qOffset, inputData, + qScale, + qOffset, outputExpectedData); } @@ -909,6 +928,8 @@ LayerTestResult TanhTestCommon( qScale, qOffset, inputData, + qScale, + qOffset, outputExpectedData); } diff --git a/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp b/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp index 086f3751b4..84d735cdf7 100644 --- a/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp +++ b/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp @@ -25,7 +25,10 @@ ClSoftmaxUint8Workload::ClSoftmaxUint8Workload(const SoftmaxQueueDescriptor& des const auto outputQuantization = output.info()->quantization_info(); - if ((outputQuantization.scale != (1.0f / 256.0f)) || (outputQuantization.offset != 0)) + if (((!outputQuantization.scale.empty()) && (outputQuantization.scale[0] != (1.0f / 256.0f))) || + ((!outputQuantization.offset.empty()) && (outputQuantization.offset[0] != 0)) || + (outputQuantization.scale.empty()) || + (outputQuantization.offset.empty())) { throw InvalidArgumentException( "Invalid quantization for output. Only scale = 1.0f / 256.0f and offset = 0 supported"); diff --git a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp b/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp index 7b2d29086e..d1e49d954c 100644 --- a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp +++ b/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp @@ -25,11 +25,15 @@ NeonSoftmaxUint8Workload::NeonSoftmaxUint8Workload(const SoftmaxQueueDescriptor& const auto outputQuantization = output.info()->quantization_info(); - if ((outputQuantization.scale != (1.0f / 256.0f)) || (outputQuantization.offset != 0)) + if (((!outputQuantization.scale.empty()) && (outputQuantization.scale[0] != (1.0f / 256.0f))) || + ((!outputQuantization.offset.empty()) && (outputQuantization.offset[0] != 0)) || + (outputQuantization.scale.empty()) || + (outputQuantization.offset.empty())) { throw InvalidArgumentException( "Invalid quantization for output. Only scale = 1.0f / 256.0f and offset = 0 supported"); } + unsigned int aclAxis = ComputeSoftmaxAclAxis(info.m_InputTensorInfos[0]); auto layer = std::make_unique(memoryManager); -- cgit v1.2.1