aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFerran Balaguer <ferran.balaguer@arm.com>2019-06-24 12:43:38 +0100
committerFerran Balaguer Arm <ferran.balaguer@arm.com>2019-06-26 15:22:31 +0000
commitb2b5a26e7b7250532df189cd7cf7cf1d4b528a28 (patch)
tree6cbb1d44f074455a58ab00c2f6edd5ab5c3e8ebe
parent5191033c76330509666e0993857b7286e2a325fc (diff)
downloadarmnn-b2b5a26e7b7250532df189cd7cf7cf1d4b528a28.tar.gz
Update the CL pin to the latest master
* Update SoftMaxUint8Workload for CL and NEON to deal with quantization parameters as vectors. * Change Sigmoid Activation function QAsymm8 tests to use scale=1.f/256.f and offset=0 as quatization output parameters. !android-nn-driver:1417 Signed-off-by: Ferran Balaguer <ferran.balaguer@arm.com> Change-Id: Ief91f10193fbbbad0c0124ece41f0bf4e0dcd992
-rwxr-xr-xscripts/get_compute_library.sh2
-rw-r--r--src/backends/backendsCommon/test/ActivationTestImpl.hpp37
-rw-r--r--src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp5
-rw-r--r--src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp6
4 files changed, 39 insertions, 11 deletions
diff --git a/scripts/get_compute_library.sh b/scripts/get_compute_library.sh
index 391168a73b..ec84209ec4 100755
--- a/scripts/get_compute_library.sh
+++ b/scripts/get_compute_library.sh
@@ -10,7 +10,7 @@ CMD=$( basename $0 )
#DEFAULT_CLFRAMEWORKREVISION="branches/arm_compute_19_05" # Release 19.05
#
# For pinning to a revision use this:
-DEFAULT_CLFRAMEWORKREVISION="d7dd15c445397ab879439de6659859db09f4b752"
+DEFAULT_CLFRAMEWORKREVISION="3689fcd5915cd902cb4ea5f618f2a6e42f6dc4a1"
usage() {
echo "Usage: $CMD (Use the default clframework SHA)"
diff --git a/src/backends/backendsCommon/test/ActivationTestImpl.hpp b/src/backends/backendsCommon/test/ActivationTestImpl.hpp
index 9088d18858..282e6438d0 100644
--- a/src/backends/backendsCommon/test/ActivationTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ActivationTestImpl.hpp
@@ -392,9 +392,11 @@ LayerTestResult<T, 4> SimpleActivationTest(
armnn::ActivationFunction activationFunction,
float activationParameterA,
float activationParameterB,
- float qScale,
- int32_t qOffset,
+ float scale,
+ int32_t offset,
const std::vector<float>& inputData,
+ float outScale,
+ int32_t outOffset,
const std::vector<float>& outputExpectedData)
{
constexpr static unsigned int inputWidth = 16u;
@@ -413,15 +415,15 @@ LayerTestResult<T, 4> SimpleActivationTest(
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
{
- inputTensorInfo.SetQuantizationScale(qScale);
- inputTensorInfo.SetQuantizationOffset(qOffset);
- outputTensorInfo.SetQuantizationScale(qScale);
- outputTensorInfo.SetQuantizationOffset(qOffset);
+ inputTensorInfo.SetQuantizationScale(scale);
+ inputTensorInfo.SetQuantizationOffset(offset);
+ outputTensorInfo.SetQuantizationScale(outScale);
+ outputTensorInfo.SetQuantizationOffset(outOffset);
}
LayerTestResult<T, 4> result(inputTensorInfo);
- auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
+ auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(scale, offset, inputData));
std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -448,7 +450,8 @@ LayerTestResult<T, 4> SimpleActivationTest(
CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
// Calculated manually.
- result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, outputExpectedData));
+ result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(outScale, outOffset,
+ outputExpectedData));
return result;
}
@@ -483,6 +486,8 @@ LayerTestResult<T, 4> SimpleSigmoidTestCommon(
qScale,
qOffset,
inputData,
+ 1.f / 256.f,
+ 0,
outputExpectedData);
}
@@ -537,6 +542,8 @@ LayerTestResult<T, 4> ReLuTestCommon(
qScale,
qOffset,
inputData,
+ qScale,
+ qOffset,
outputExpectedData);
}
@@ -594,6 +601,8 @@ LayerTestResult<T, 4> BoundedReLuTestCommon(
qScale,
qOffset,
inputData,
+ qScale,
+ qOffset,
outputExpectedData);
}
@@ -636,6 +645,8 @@ LayerTestResult<T, 4> SoftReLuTestCommon(
qScale,
qOffset,
inputData,
+ qScale,
+ qOffset,
outputExpectedData);
}
@@ -691,6 +702,8 @@ LayerTestResult<T, 4> LeakyReLuTestCommon(
qScale,
qOffset,
inputData,
+ qScale,
+ qOffset,
outputExpectedData);
}
@@ -745,6 +758,8 @@ LayerTestResult<T, 4> AbsTestCommon(
qScale,
qOffset,
inputData,
+ qScale,
+ qOffset,
outputExpectedData);
}
@@ -799,6 +814,8 @@ LayerTestResult<T, 4> SqrtTestCommon(
qScale,
qOffset,
inputData,
+ qScale,
+ qOffset,
outputExpectedData);
}
@@ -853,6 +870,8 @@ LayerTestResult<T, 4> SquareTestCommon(
qScale,
qOffset,
inputData,
+ qScale,
+ qOffset,
outputExpectedData);
}
@@ -909,6 +928,8 @@ LayerTestResult<T, 4> TanhTestCommon(
qScale,
qOffset,
inputData,
+ qScale,
+ qOffset,
outputExpectedData);
}
diff --git a/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp b/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp
index 086f3751b4..84d735cdf7 100644
--- a/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp
+++ b/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp
@@ -25,7 +25,10 @@ ClSoftmaxUint8Workload::ClSoftmaxUint8Workload(const SoftmaxQueueDescriptor& des
const auto outputQuantization = output.info()->quantization_info();
- if ((outputQuantization.scale != (1.0f / 256.0f)) || (outputQuantization.offset != 0))
+ if (((!outputQuantization.scale.empty()) && (outputQuantization.scale[0] != (1.0f / 256.0f))) ||
+ ((!outputQuantization.offset.empty()) && (outputQuantization.offset[0] != 0)) ||
+ (outputQuantization.scale.empty()) ||
+ (outputQuantization.offset.empty()))
{
throw InvalidArgumentException(
"Invalid quantization for output. Only scale = 1.0f / 256.0f and offset = 0 supported");
diff --git a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp b/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp
index 7b2d29086e..d1e49d954c 100644
--- a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp
+++ b/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp
@@ -25,11 +25,15 @@ NeonSoftmaxUint8Workload::NeonSoftmaxUint8Workload(const SoftmaxQueueDescriptor&
const auto outputQuantization = output.info()->quantization_info();
- if ((outputQuantization.scale != (1.0f / 256.0f)) || (outputQuantization.offset != 0))
+ if (((!outputQuantization.scale.empty()) && (outputQuantization.scale[0] != (1.0f / 256.0f))) ||
+ ((!outputQuantization.offset.empty()) && (outputQuantization.offset[0] != 0)) ||
+ (outputQuantization.scale.empty()) ||
+ (outputQuantization.offset.empty()))
{
throw InvalidArgumentException(
"Invalid quantization for output. Only scale = 1.0f / 256.0f and offset = 0 supported");
}
+
unsigned int aclAxis = ComputeSoftmaxAclAxis(info.m_InputTensorInfos[0]);
auto layer = std::make_unique<arm_compute::NESoftmaxLayer>(memoryManager);