diff options
Diffstat (limited to 'src/backends')
-rw-r--r-- | src/backends/backendsCommon/test/ActivationTestImpl.hpp | 37 | ||||
-rw-r--r-- | src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp | 5 | ||||
-rw-r--r-- | src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp | 6 |
3 files changed, 38 insertions, 10 deletions
diff --git a/src/backends/backendsCommon/test/ActivationTestImpl.hpp b/src/backends/backendsCommon/test/ActivationTestImpl.hpp index 9088d18858..282e6438d0 100644 --- a/src/backends/backendsCommon/test/ActivationTestImpl.hpp +++ b/src/backends/backendsCommon/test/ActivationTestImpl.hpp @@ -392,9 +392,11 @@ LayerTestResult<T, 4> SimpleActivationTest( armnn::ActivationFunction activationFunction, float activationParameterA, float activationParameterB, - float qScale, - int32_t qOffset, + float scale, + int32_t offset, const std::vector<float>& inputData, + float outScale, + int32_t outOffset, const std::vector<float>& outputExpectedData) { constexpr static unsigned int inputWidth = 16u; @@ -413,15 +415,15 @@ LayerTestResult<T, 4> SimpleActivationTest( // Set quantization parameters if the requested type is a quantized type. if(armnn::IsQuantizedType<T>()) { - inputTensorInfo.SetQuantizationScale(qScale); - inputTensorInfo.SetQuantizationOffset(qOffset); - outputTensorInfo.SetQuantizationScale(qScale); - outputTensorInfo.SetQuantizationOffset(qOffset); + inputTensorInfo.SetQuantizationScale(scale); + inputTensorInfo.SetQuantizationOffset(offset); + outputTensorInfo.SetQuantizationScale(outScale); + outputTensorInfo.SetQuantizationOffset(outOffset); } LayerTestResult<T, 4> result(inputTensorInfo); - auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData)); + auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(scale, offset, inputData)); std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); @@ -448,7 +450,8 @@ LayerTestResult<T, 4> SimpleActivationTest( CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); // Calculated manually. - result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, outputExpectedData)); + result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(outScale, outOffset, + outputExpectedData)); return result; } @@ -483,6 +486,8 @@ LayerTestResult<T, 4> SimpleSigmoidTestCommon( qScale, qOffset, inputData, + 1.f / 256.f, + 0, outputExpectedData); } @@ -537,6 +542,8 @@ LayerTestResult<T, 4> ReLuTestCommon( qScale, qOffset, inputData, + qScale, + qOffset, outputExpectedData); } @@ -594,6 +601,8 @@ LayerTestResult<T, 4> BoundedReLuTestCommon( qScale, qOffset, inputData, + qScale, + qOffset, outputExpectedData); } @@ -636,6 +645,8 @@ LayerTestResult<T, 4> SoftReLuTestCommon( qScale, qOffset, inputData, + qScale, + qOffset, outputExpectedData); } @@ -691,6 +702,8 @@ LayerTestResult<T, 4> LeakyReLuTestCommon( qScale, qOffset, inputData, + qScale, + qOffset, outputExpectedData); } @@ -745,6 +758,8 @@ LayerTestResult<T, 4> AbsTestCommon( qScale, qOffset, inputData, + qScale, + qOffset, outputExpectedData); } @@ -799,6 +814,8 @@ LayerTestResult<T, 4> SqrtTestCommon( qScale, qOffset, inputData, + qScale, + qOffset, outputExpectedData); } @@ -853,6 +870,8 @@ LayerTestResult<T, 4> SquareTestCommon( qScale, qOffset, inputData, + qScale, + qOffset, outputExpectedData); } @@ -909,6 +928,8 @@ LayerTestResult<T, 4> TanhTestCommon( qScale, qOffset, inputData, + qScale, + qOffset, outputExpectedData); } diff --git a/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp b/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp index 086f3751b4..84d735cdf7 100644 --- a/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp +++ b/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp @@ -25,7 +25,10 @@ ClSoftmaxUint8Workload::ClSoftmaxUint8Workload(const SoftmaxQueueDescriptor& des const auto outputQuantization = output.info()->quantization_info(); - if ((outputQuantization.scale != (1.0f / 256.0f)) || (outputQuantization.offset != 0)) + if (((!outputQuantization.scale.empty()) && (outputQuantization.scale[0] != (1.0f / 256.0f))) || + ((!outputQuantization.offset.empty()) && (outputQuantization.offset[0] != 0)) || + (outputQuantization.scale.empty()) || + (outputQuantization.offset.empty())) { throw InvalidArgumentException( "Invalid quantization for output. Only scale = 1.0f / 256.0f and offset = 0 supported"); diff --git a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp b/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp index 7b2d29086e..d1e49d954c 100644 --- a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp +++ b/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp @@ -25,11 +25,15 @@ NeonSoftmaxUint8Workload::NeonSoftmaxUint8Workload(const SoftmaxQueueDescriptor& const auto outputQuantization = output.info()->quantization_info(); - if ((outputQuantization.scale != (1.0f / 256.0f)) || (outputQuantization.offset != 0)) + if (((!outputQuantization.scale.empty()) && (outputQuantization.scale[0] != (1.0f / 256.0f))) || + ((!outputQuantization.offset.empty()) && (outputQuantization.offset[0] != 0)) || + (outputQuantization.scale.empty()) || + (outputQuantization.offset.empty())) { throw InvalidArgumentException( "Invalid quantization for output. Only scale = 1.0f / 256.0f and offset = 0 supported"); } + unsigned int aclAxis = ComputeSoftmaxAclAxis(info.m_InputTensorInfos[0]); auto layer = std::make_unique<arm_compute::NESoftmaxLayer>(memoryManager); |