diff options
author | Francis Murtagh <francis.murtagh@arm.com> | 2019-11-19 12:24:19 +0000 |
---|---|---|
committer | Jim Flynn Arm <jim.flynn@arm.com> | 2019-11-19 14:15:52 +0000 |
commit | 52ec3463086a12282d8b833521e9e32d1055c6a6 (patch) | |
tree | 4b312b58815d2552fb152753405aedfb238616f5 /src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp | |
parent | 47e863dce36d9722ece3c4ceeb59c548edbb8e4f (diff) | |
download | armnn-52ec3463086a12282d8b833521e9e32d1055c6a6.tar.gz |
IVGCVSW-3697 Add check for ArgMinMax QAsymm8 to ClLayerSupport
* Enable Neon EndToEnd tests for ArgMinMax QAsymm8
* Enable Neon Layer tests for ArgMinMax QAsymm8
Signed-off-by: Francis Murtagh <francis.murtagh@arm.com>
Change-Id: Ifa7463ded4397cacb82fb3667006f08ecbe3cd32
Diffstat (limited to 'src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp')
-rw-r--r-- | src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp | 30 |
1 files changed, 26 insertions, 4 deletions
diff --git a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp index 4475fb7abf..18c9e54f25 100644 --- a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp @@ -70,7 +70,7 @@ LayerTestResult<int32_t, 3> ArgMaxSimpleTest( armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); - if(armnn::IsQuantizedType<T>()) + if (armnn::IsQuantizedType<T>()) { inputTensorInfo.SetQuantizationScale(1.0f); inputTensorInfo.SetQuantizationOffset(0); @@ -97,7 +97,7 @@ LayerTestResult<int32_t, 3> ArgMinSimpleTest( armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); - if(armnn::IsQuantizedType<T>()) + if (armnn::IsQuantizedType<T>()) { inputTensorInfo.SetQuantizationScale(1.0f); inputTensorInfo.SetQuantizationOffset(0); @@ -124,7 +124,7 @@ LayerTestResult<int32_t, 3> ArgMinChannelTest( armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); - if(armnn::IsQuantizedType<T>()) + if (armnn::IsQuantizedType<T>()) { inputTensorInfo.SetQuantizationScale(1.0f); inputTensorInfo.SetQuantizationOffset(0); @@ -159,7 +159,7 @@ LayerTestResult<int32_t, 3> ArgMaxChannelTest( armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); - if(armnn::IsQuantizedType<T>()) + if (armnn::IsQuantizedType<T>()) { inputTensorInfo.SetQuantizationScale(1.0f); inputTensorInfo.SetQuantizationOffset(0); @@ -195,6 +195,12 @@ LayerTestResult<int32_t, 3> ArgMaxHeightTest( armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32); + if (armnn::IsQuantizedType<T>()) + { + inputTensorInfo.SetQuantizationScale(1.0f); + inputTensorInfo.SetQuantizationOffset(0); + } + std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, @@ -224,6 +230,12 @@ LayerTestResult<int32_t, 3> ArgMinWidthTest( armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32); + if (armnn::IsQuantizedType<T>()) + { + inputTensorInfo.SetQuantizationScale(1.0f); + inputTensorInfo.SetQuantizationOffset(0); + } + std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, @@ -336,6 +348,11 @@ ArgMaxHeightTest<armnn::DataType::Signed32>( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); template LayerTestResult<int32_t, 3> +ArgMaxHeightTest<armnn::DataType::QuantisedAsymm8>( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +template LayerTestResult<int32_t, 3> ArgMinWidthTest<armnn::DataType::Float32>( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); @@ -344,3 +361,8 @@ template LayerTestResult<int32_t, 3> ArgMinWidthTest<armnn::DataType::Signed32>( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +template LayerTestResult<int32_t, 3> +ArgMinWidthTest<armnn::DataType::QuantisedAsymm8>( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); |