aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatteo Martincigh <matteo.martincigh@arm.com>2019-06-04 10:59:47 +0100
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2019-06-04 15:20:45 +0000
commitf550713476f404a82e59bd68223a8a4955e753f2 (patch)
tree762b3eae41d412c1da7b45c9ead1ca2547b66f12
parent3122bd574a3d29774c535ca2136de361da626e88 (diff)
downloadarmnn-f550713476f404a82e59bd68223a8a4955e753f2.tar.gz
IVGCVSW-3213 Extend the Reference BatchNormalization workload to
support the new QSymm16 type * Added QSymm16 to the range of supported types for batch normalization ref workloads * Added unit tests for QSymm16 Change-Id: I5b2fcfbd9cb5af149ebfe24e2d95f3affa2e3690 Signed-off-by: Matteo Martincigh <matteo.martincigh@arm.com>
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp3
-rw-r--r--src/backends/backendsCommon/test/LayerTests.cpp86
-rw-r--r--src/backends/backendsCommon/test/LayerTests.hpp10
-rw-r--r--src/backends/reference/RefLayerSupport.cpp5
-rw-r--r--src/backends/reference/test/RefCreateWorkloadTests.cpp12
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp2
6 files changed, 114 insertions, 4 deletions
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index a43619a466..a373f55d3e 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -692,7 +692,8 @@ void BatchNormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInf
{
DataType::Float16,
DataType::Float32,
- DataType::QuantisedAsymm8
+ DataType::QuantisedAsymm8,
+ DataType::QuantisedSymm16
};
ValidateDataTypes(input, supportedTypes, "BatchNormalizationQueueDescriptor");
diff --git a/src/backends/backendsCommon/test/LayerTests.cpp b/src/backends/backendsCommon/test/LayerTests.cpp
index 5679fffe99..e96f7dc732 100644
--- a/src/backends/backendsCommon/test/LayerTests.cpp
+++ b/src/backends/backendsCommon/test/LayerTests.cpp
@@ -8259,6 +8259,92 @@ LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
1.f/20.f, 50, armnn::DataLayout::NHWC);
}
+LayerTestResult<int16_t, 4> BatchNormInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ // BatchSize: 1
+ // Channels: 2
+ // Height: 3
+ // Width: 2
+
+ const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
+ std::vector<float> inputValues
+ {
+ // Batch 0, Channel 0, Height (3) x Width (2)
+ 1.f, 4.f,
+ 4.f, 2.f,
+ 1.f, 6.f,
+
+ // Batch 0, Channel 1, Height (3) x Width (2)
+ 1.f, 1.f,
+ 4.f, 1.f,
+ -2.f, 4.f
+ };
+ std::vector<float> expectedOutputValues
+ {
+ // Batch 0, Channel 0, Height (3) x Width (2)
+ 1.f, 4.f,
+ 4.f, 2.f,
+ 1.f, 6.f,
+
+ // Batch 0, Channel 1, Height (3) x Width (2)
+ 3.f, 3.f,
+ 4.f, 3.f,
+ 2.f, 4.f
+ };
+
+ return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>(
+ workloadFactory, memoryManager,
+ inputOutputShape, inputValues, expectedOutputValues,
+ 1.f/20.f, 50, armnn::DataLayout::NCHW);
+}
+
+LayerTestResult<int16_t, 4> BatchNormInt16NhwcTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ // BatchSize: 1
+ // Height: 3
+ // Width: 2
+ // Channels: 2
+
+ const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
+ std::vector<float> inputValues
+ {
+ // Batch 0, Height 0, Width (2) x Channel (2)
+ 1.f, 1.f,
+ 4.f, 1.f,
+
+ // Batch 0, Height 1, Width (2) x Channel (2)
+ 4.f, 4.f,
+ 2.f, 1.f,
+
+ // Batch 0, Height 2, Width (2) x Channel (2)
+ 1.f, -2.f,
+ 6.f, 4.f
+ };
+ std::vector<float> expectedOutputValues
+ {
+ // Batch 0, Height 0, Width (2) x Channel (2)
+ 1.f, 3.f,
+ 4.f, 3.f,
+
+ // Batch 0, Height 1, Width (2) x Channel (2)
+ 4.f, 4.f,
+ 2.f, 3.f,
+
+ // Batch 0, Height 2, Width (2) x Channel (2)
+ 1.f, 2.f,
+ 6.f, 4.f
+ };
+
+ return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>
+ (workloadFactory, memoryManager,
+ inputOutputShape, inputValues, expectedOutputValues,
+ 1.f/20.f, 50, armnn::DataLayout::NHWC);
+}
+
LayerTestResult<uint8_t, 4> ConstantUint8CustomQuantizationScaleAndOffsetTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index df79e46dee..be686c141e 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -1043,6 +1043,14 @@ LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+LayerTestResult<int16_t, 4> BatchNormInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 4> BatchNormInt16NhwcTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
LayerTestResult<uint8_t, 4> ConstantUint8CustomQuantizationScaleAndOffsetTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
@@ -2000,4 +2008,4 @@ LayerTestResult<T, 4> SimpleFloorTest(
CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
return ret;
-} \ No newline at end of file
+}
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index aeff51d853..adc63e965b 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -288,10 +288,11 @@ bool RefLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
{
ignore_unused(descriptor);
- std::array<DataType, 2> supportedTypes =
+ std::array<DataType, 3> supportedTypes =
{
DataType::Float32,
- DataType::QuantisedAsymm8
+ DataType::QuantisedAsymm8,
+ DataType::QuantisedSymm16
};
bool supported = true;
diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp
index a0c614564d..83e3f6c7bb 100644
--- a/src/backends/reference/test/RefCreateWorkloadTests.cpp
+++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp
@@ -229,6 +229,18 @@ BOOST_AUTO_TEST_CASE(CreateBatchNormalizationUint8WorkloadNhwc)
(DataLayout::NHWC);
}
+BOOST_AUTO_TEST_CASE(CreateBatchNormalizationInt16Workload)
+{
+ RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QuantisedSymm16>
+ (DataLayout::NCHW);
+}
+
+BOOST_AUTO_TEST_CASE(CreateBatchNormalizationInt16WorkloadNhwc)
+{
+ RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QuantisedSymm16>
+ (DataLayout::NHWC);
+}
+
BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Float32Workload)
{
Graph graph;
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index 162027032e..afeadb9485 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -357,6 +357,8 @@ ARMNN_AUTO_TEST_CASE(BatchNorm, BatchNormTest)
ARMNN_AUTO_TEST_CASE(BatchNormNhwc, BatchNormNhwcTest)
ARMNN_AUTO_TEST_CASE(BatchNormUint8, BatchNormUint8Test)
ARMNN_AUTO_TEST_CASE(BatchNormUint8Nhwc, BatchNormUint8NhwcTest)
+ARMNN_AUTO_TEST_CASE(BatchNormInt16, BatchNormInt16Test)
+ARMNN_AUTO_TEST_CASE(BatchNormInt16Nhwc, BatchNormInt16NhwcTest)
// Resize Bilinear - NCHW
ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, SimpleResizeBilinearTest, armnn::DataLayout::NCHW)