aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2020-04-17 12:45:14 +0100
committerTeresaARM <teresa.charlinreyes@arm.com>2020-04-27 14:49:42 +0000
commit303980c502c721f13d65e7087be6c0758df65044 (patch)
treef1a9ab898b3121b988b8328161eddeb6a608e73f
parent49c52a1e3be742cd7785ccc36c31cbbe495c4003 (diff)
downloadarmnn-303980c502c721f13d65e7087be6c0758df65044.tar.gz
IVGCVSW-4668 Add TENSOR_QUANT8_ASYMM_SIGNED data type support to CpuRef operators
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Change-Id: I094125ba80699cc3cf5226bda6662a54e6caa988
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp75
-rw-r--r--src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp10
-rw-r--r--src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp30
-rw-r--r--src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp37
-rw-r--r--src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp25
-rw-r--r--src/backends/backendsCommon/test/layerTests/NegTestImpl.cpp10
-rw-r--r--src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp10
-rw-r--r--src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp65
-rw-r--r--src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp10
-rw-r--r--src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp27
-rw-r--r--src/backends/cl/test/ClLayerTests.cpp89
-rw-r--r--src/backends/neon/test/NeonLayerTests.cpp76
-rw-r--r--src/backends/reference/RefLayerSupport.cpp140
-rw-r--r--src/backends/reference/RefWorkloadFactory.cpp12
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp218
-rw-r--r--src/backends/reference/workloads/Pad.cpp6
-rw-r--r--src/backends/reference/workloads/RefPadWorkload.cpp1
-rw-r--r--src/backends/reference/workloads/RefPadWorkload.hpp9
-rw-r--r--src/backends/reference/workloads/RefPermuteWorkload.cpp1
-rw-r--r--src/backends/reference/workloads/RefPermuteWorkload.hpp9
-rw-r--r--src/backends/reference/workloads/RefTransposeWorkload.cpp1
-rw-r--r--src/backends/reference/workloads/RefTransposeWorkload.hpp1
22 files changed, 775 insertions, 87 deletions
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 5fe056e669..d1249a492f 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -365,8 +365,8 @@ void ValidateWeightDataType(const TensorInfo& inputInfo,
ARMNN_NO_DEPRECATE_WARN_BEGIN
const std::vector<DataType> validTypes =
{
- DataType::QAsymmU8,
DataType::QAsymmS8,
+ DataType::QAsymmU8,
DataType::QSymmS8,
DataType::QuantizedSymm8PerAxis // deprecated
};
@@ -633,6 +633,7 @@ void ArgMinMaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
DataType::BFloat16,
DataType::Float16,
DataType::Float32,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16,
DataType::Signed32
@@ -715,6 +716,7 @@ void SplitterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
DataType::Float16,
DataType::Boolean,
DataType::Signed32,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -852,6 +854,7 @@ void ConcatQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
DataType::Float16,
DataType::Boolean,
DataType::Signed32,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -940,6 +943,7 @@ void StackQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
DataType::Float16,
DataType::Boolean,
DataType::Signed32,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -1040,6 +1044,7 @@ void NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) co
DataType::BFloat16,
DataType::Float16,
DataType::Float32,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -1101,11 +1106,11 @@ void MultiplicationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c
std::vector<DataType> supportedTypes =
{
DataType::BFloat16,
+ DataType::Float16,
DataType::Float32,
- DataType::QAsymmU8,
DataType::QAsymmS8,
- DataType::QSymmS16,
- DataType::Float16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
@@ -1138,6 +1143,7 @@ void BatchNormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInf
DataType::BFloat16,
DataType::Float16,
DataType::Float32,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -1209,12 +1215,12 @@ void Convolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) co
std::vector<DataType> supportedTypes =
{
DataType::BFloat16,
+ DataType::Float16,
DataType::Float32,
DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16,
- DataType::QSymmS8,
- DataType::Float16
+ DataType::QSymmS8
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
@@ -1298,11 +1304,11 @@ void DepthwiseConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloa
std::vector<DataType> supportedTypes =
{
DataType::BFloat16,
+ DataType::Float16,
DataType::Float32,
- DataType::QAsymmU8,
DataType::QAsymmS8,
- DataType::QSymmS16,
- DataType::Float16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
@@ -1383,6 +1389,7 @@ void ResizeBilinearQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c
DataType::BFloat16,
DataType::Float16,
DataType::Float32,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -1535,6 +1542,7 @@ void L2NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo)
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -1587,11 +1595,11 @@ void ConstantQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
- DataType::Signed32,
- DataType::QAsymmU8,
DataType::QAsymmS8,
+ DataType::QAsymmU8,
DataType::QSymmS8,
- DataType::QSymmS16
+ DataType::QSymmS16,
+ DataType::Signed32
};
ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
@@ -1615,10 +1623,10 @@ void ReshapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
- DataType::Signed32,
- DataType::QSymmS16,
DataType::QAsymmS8,
- DataType::QAsymmU8
+ DataType::QAsymmU8,
+ DataType::QSymmS16,
+ DataType::Signed32
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
@@ -1683,6 +1691,7 @@ void SpaceToBatchNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c
DataType::BFloat16,
DataType::Float16,
DataType::Float32,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -1709,6 +1718,7 @@ void SpaceToDepthQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) con
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -2146,11 +2156,12 @@ void DivisionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
std::vector<DataType> supportedTypes =
{
+ DataType::BFloat16,
+ DataType::Float16,
DataType::Float32,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
- DataType::QSymmS16,
- DataType::Float16,
- DataType::BFloat16
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
@@ -2178,11 +2189,12 @@ void SubtractionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) cons
std::vector<DataType> supportedTypes =
{
+ DataType::BFloat16,
+ DataType::Float16,
DataType::Float32,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
- DataType::QSymmS16,
- DataType::Float16,
- DataType::BFloat16
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
@@ -2213,10 +2225,10 @@ void MaximumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
DataType::BFloat16,
DataType::Float16,
DataType::Float32,
- DataType::Signed32,
DataType::QAsymmS8,
DataType::QAsymmU8,
- DataType::QSymmS16
+ DataType::QSymmS16,
+ DataType::Signed32
};
ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
@@ -2246,6 +2258,7 @@ void MeanQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -2340,6 +2353,7 @@ void BatchToSpaceNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -2363,6 +2377,7 @@ void StridedSliceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) con
DataType::BFloat16,
DataType::Float16,
DataType::Float32,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -2420,9 +2435,10 @@ void MinimumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
DataType::BFloat16,
DataType::Float16,
DataType::Float32,
- DataType::Signed32,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
- DataType::QSymmS16
+ DataType::QSymmS16,
+ DataType::Signed32
};
ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
@@ -2510,6 +2526,7 @@ void RsqrtQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
DataType::BFloat16,
DataType::Float16,
DataType::Float32,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -2539,6 +2556,7 @@ void GatherQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
DataType::BFloat16,
DataType::Float16,
DataType::Float32,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -2586,6 +2604,7 @@ void DetectionPostProcessQueueDescriptor::Validate(const WorkloadInfo& workloadI
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -2678,6 +2697,7 @@ void SwitchQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
{
DataType::BFloat16,
DataType::Float32,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -2722,6 +2742,7 @@ void PreluQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
DataType::BFloat16,
DataType::Float16,
DataType::Float32,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -2785,6 +2806,7 @@ void TransposeConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloa
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -3010,6 +3032,7 @@ void AbsQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
DataType::BFloat16,
DataType::Float16,
DataType::Float32,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16,
DataType::Signed32
@@ -3092,6 +3115,7 @@ void DepthToSpaceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) con
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -3167,6 +3191,7 @@ void ElementwiseUnaryQueueDescriptor::Validate(const WorkloadInfo& workloadInfo)
DataType::BFloat16,
DataType::Float16,
DataType::Float32,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16,
DataType::Signed32
diff --git a/src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp
index e6c6a96a9f..2cbc059044 100644
--- a/src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp
@@ -114,6 +114,11 @@ Abs2dTest<armnn::DataType::Signed32>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 2>
+Abs2dTest<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2>
Abs2dTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
@@ -134,6 +139,11 @@ Abs3dTest<armnn::DataType::Float16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 3>
+Abs3dTest<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
template LayerTestResult<armnn::ResolveType<armnn::DataType::Signed32>, 3>
Abs3dTest<armnn::DataType::Signed32>(
armnn::IWorkloadFactory& workloadFactory,
diff --git a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
index 20dcef5dd4..45ac05387a 100644
--- a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
@@ -263,6 +263,11 @@ ArgMaxSimpleTest<armnn::DataType::Float32>(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
template LayerTestResult<int32_t, 3>
+ArgMaxSimpleTest<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 3>
ArgMaxSimpleTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
@@ -283,6 +288,11 @@ ArgMinSimpleTest<armnn::DataType::Float32>(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
template LayerTestResult<int32_t, 3>
+ArgMinSimpleTest<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 3>
ArgMinSimpleTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
@@ -303,6 +313,11 @@ ArgMinChannelTest<armnn::DataType::Float32>(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
template LayerTestResult<int32_t, 3>
+ArgMinChannelTest<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 3>
ArgMinChannelTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
@@ -323,6 +338,11 @@ ArgMaxChannelTest<armnn::DataType::Float32>(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
template LayerTestResult<int32_t, 3>
+ArgMaxChannelTest<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 3>
ArgMaxChannelTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
@@ -348,6 +368,11 @@ ArgMaxHeightTest<armnn::DataType::Signed32>(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
template LayerTestResult<int32_t, 3>
+ArgMaxHeightTest<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 3>
ArgMaxHeightTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
@@ -363,6 +388,11 @@ ArgMinWidthTest<armnn::DataType::Signed32>(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
template LayerTestResult<int32_t, 3>
+ArgMinWidthTest<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 3>
ArgMinWidthTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
diff --git a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
index c66027efdf..154ece2657 100644
--- a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
@@ -1019,6 +1019,7 @@ LayerTestResult<T, 4> Convolution2d3x3DilationTestCommon(
switch (ArmnnType)
{
case armnn::DataType::QAsymmU8:
+ case armnn::DataType::QAsymmS8:
{
qScale = 0.1f;
qOffset = 128;
@@ -2520,6 +2521,7 @@ LayerTestResult<T, 4> DepthwiseConvolution2d3x3DilationTestCommon(
int32_t qOffset;
switch (ArmnnType)
{
+ case armnn::DataType::QAsymmS8:
case armnn::DataType::QAsymmU8:
{
qScale = 0.1f;
@@ -3022,6 +3024,13 @@ Convolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float
bool,
armnn::DataLayout);
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
+Convolution2d3x3Dilation3x3Test<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
+ armnn::IWorkloadFactory&,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
+ bool,
+ armnn::DataLayout);
+
template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
Convolution2d3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
armnn::IWorkloadFactory&,
@@ -3050,6 +3059,13 @@ Convolution2d2x3x3Dilation3x3Test<armnn::DataType::BFloat16, armnn::DataType::BF
bool,
armnn::DataLayout);
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
+Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
+ armnn::IWorkloadFactory&,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
+ bool,
+ armnn::DataLayout);
+
template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
armnn::IWorkloadFactory&,
@@ -3078,6 +3094,13 @@ Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::Float32, arm
bool biasEnabled,
const armnn::DataLayout layout);
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
+Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
+ armnn::IWorkloadFactory &workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
+ bool biasEnabled,
+ const armnn::DataLayout layout);
+
template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
armnn::IWorkloadFactory &workloadFactory,
@@ -3106,6 +3129,13 @@ DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataTy
bool,
armnn::DataLayout);
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
+DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
+ armnn::IWorkloadFactory&,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
+ bool,
+ armnn::DataLayout);
+
template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
armnn::IWorkloadFactory&,
@@ -3134,6 +3164,13 @@ DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::Data
bool,
armnn::DataLayout);
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
+DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
+ armnn::IWorkloadFactory&,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
+ bool,
+ armnn::DataLayout);
+
template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
armnn::IWorkloadFactory&,
diff --git a/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp
index 4d4a6bc156..69994ddb03 100644
--- a/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp
@@ -307,6 +307,31 @@ DepthToSpaceTest4<armnn::DataType::QAsymmU8>(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::DataLayout dataLayout);
+// QuantisedAsymmS8
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
+DepthToSpaceTest1<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
+DepthToSpaceTest2<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
+DepthToSpaceTest3<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
+DepthToSpaceTest4<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ armnn::DataLayout dataLayout);
+
// QuantisedSymm16
template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
DepthToSpaceTest1<armnn::DataType::QSymmS16>(
diff --git a/src/backends/backendsCommon/test/layerTests/NegTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/NegTestImpl.cpp
index aace926dcb..f2ed22238e 100644
--- a/src/backends/backendsCommon/test/layerTests/NegTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/NegTestImpl.cpp
@@ -133,6 +133,11 @@ Neg2dTest<armnn::DataType::Float16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 2>
+Neg2dTest<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2>
Neg2dTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
@@ -153,6 +158,11 @@ Neg3dTest<armnn::DataType::Float16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 3>
+Neg3dTest<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 3>
Neg3dTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
diff --git a/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp
index 5ed947d8c3..979d0a7f73 100644
--- a/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp
@@ -176,6 +176,11 @@ SimpleReshapeTest<armnn::DataType::Float32>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
+SimpleReshapeTest<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
SimpleReshapeTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
@@ -191,6 +196,11 @@ Reshape5dTest<armnn::DataType::Float32>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 5>
+Reshape5dTest<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 5>
Reshape5dTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
diff --git a/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp
index e95f18b7a5..f12f53c794 100644
--- a/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp
@@ -727,6 +727,71 @@ ResizeNearestNeighborMagTest<armnn::DataType::QAsymmU8>(
float outQuantScale,
int32_t outQuantOffset);
+// QAsymmS8
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
+ResizeBilinearNopTest<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
+SimpleResizeBilinearTest<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
+ResizeBilinearSqMinTest<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
+ResizeBilinearMinTest<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
+ResizeBilinearMagTest<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
+ResizeNearestNeighborNopTest<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
+SimpleResizeNearestNeighborTest<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
+ResizeNearestNeighborSqMinTest<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
+ResizeNearestNeighborMinTest<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
+ResizeNearestNeighborMagTest<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout dataLayout,
+ float inQuantScale,
+ int32_t inQuantOffset,
+ float outQuantScale,
+ int32_t outQuantOffset);
+
// QSymm16
template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
ResizeBilinearNopTest<armnn::DataType::QSymmS16>(
diff --git a/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp
index ca423835dc..367c82fb7c 100644
--- a/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp
@@ -133,6 +133,11 @@ Rsqrt2dTest<armnn::DataType::Float16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 2>
+Rsqrt2dTest<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2>
Rsqrt2dTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
@@ -153,6 +158,11 @@ Rsqrt3dTest<armnn::DataType::Float16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 3>
+Rsqrt3dTest<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 3>
Rsqrt3dTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
diff --git a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
index 07f52584ca..813c623cff 100644
--- a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
@@ -671,6 +671,13 @@ SimpleTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Floa
bool biasEnabled,
const armnn::DataLayout layout);
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
+SimpleTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ bool biasEnabled,
+ const armnn::DataLayout layout);
+
template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
SimpleTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
armnn::IWorkloadFactory& workloadFactory,
@@ -692,6 +699,13 @@ PaddedTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Floa
bool biasEnabled,
const armnn::DataLayout layout);
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
+PaddedTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ bool biasEnabled,
+ const armnn::DataLayout layout);
+
template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
PaddedTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
armnn::IWorkloadFactory& workloadFactory,
@@ -713,6 +727,13 @@ StridedTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Flo
bool biasEnabled,
const armnn::DataLayout layout);
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
+StridedTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ bool biasEnabled,
+ const armnn::DataLayout layout);
+
template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
StridedTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
armnn::IWorkloadFactory& workloadFactory,
@@ -733,6 +754,12 @@ MultiChannelTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout layout);
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
+MultiChannelTransposeConvolution2dTest<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout layout);
+
template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
MultiChannelTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
armnn::IWorkloadFactory& workloadFactory,
diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp
index 509da41f81..ce4496e71b 100644
--- a/src/backends/cl/test/ClLayerTests.cpp
+++ b/src/backends/cl/test/ClLayerTests.cpp
@@ -81,6 +81,14 @@ ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat321, BatchToSpaceNdNchwTest1<DataTyp
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat322, BatchToSpaceNdNchwTest2<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat323, BatchToSpaceNdNchwTest3<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt1, BatchToSpaceNdNhwcTest1<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt2, BatchToSpaceNdNhwcTest2<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt3, BatchToSpaceNdNhwcTest3<DataType::QAsymmS8>)
+
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt1, BatchToSpaceNdNchwTest1<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt2, BatchToSpaceNdNchwTest2<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt3, BatchToSpaceNdNchwTest3<DataType::QAsymmS8>)
+
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint1, BatchToSpaceNdNhwcTest1<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint2, BatchToSpaceNdNhwcTest2<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint3, BatchToSpaceNdNhwcTest3<DataType::QAsymmU8>)
@@ -414,6 +422,11 @@ ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_2, DepthToSpaceTest2<DataType::Floa
ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_3, DepthToSpaceTest3<DataType::Float16>, DataLayout::NCHW);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_4, DepthToSpaceTest4<DataType::Float16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_1, DepthToSpaceTest1<DataType::QAsymmS8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_2, DepthToSpaceTest2<DataType::QAsymmS8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_3, DepthToSpaceTest3<DataType::QAsymmS8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_4, DepthToSpaceTest4<DataType::QAsymmS8>, DataLayout::NCHW);
+
ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_1, DepthToSpaceTest1<DataType::QAsymmU8>, DataLayout::NCHW);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_2, DepthToSpaceTest2<DataType::QAsymmU8>, DataLayout::NCHW);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_3, DepthToSpaceTest3<DataType::QAsymmU8>, DataLayout::NCHW);
@@ -434,6 +447,11 @@ ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_2, DepthToSpaceTest2<DataType::Floa
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_3, DepthToSpaceTest3<DataType::Float16>, DataLayout::NHWC);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_4, DepthToSpaceTest4<DataType::Float16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_1, DepthToSpaceTest1<DataType::QAsymmS8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_2, DepthToSpaceTest2<DataType::QAsymmS8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_3, DepthToSpaceTest3<DataType::QAsymmS8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_4, DepthToSpaceTest4<DataType::QAsymmS8>, DataLayout::NHWC);
+
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_1, DepthToSpaceTest1<DataType::QAsymmU8>, DataLayout::NHWC);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_2, DepthToSpaceTest2<DataType::QAsymmU8>, DataLayout::NHWC);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_3, DepthToSpaceTest3<DataType::QAsymmU8>, DataLayout::NHWC);
@@ -449,6 +467,7 @@ ARMNN_AUTO_TEST_CASE(SimpleFloor, SimpleFloorTest<DataType::Float32>)
// Reshape
ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(SimpleReshapeInt8, SimpleReshapeTest<DataType::QAsymmS8>)
ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeTest<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(Reshape5d, Reshape5dTest<DataType::Float32>)
@@ -477,6 +496,10 @@ ARMNN_AUTO_TEST_CASE(SimplePermuteFloat32, SimplePermuteTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet1Test, PermuteValueSet1Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet2Test, PermuteValueSet2Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet3Test, PermuteValueSet3Test<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(SimplePermuteQASymmS8, SimplePermuteTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(PermuteQASymmS8ValueSet1Test, PermuteValueSet1Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(PermuteQASymmS8ValueSet2Test, PermuteValueSet2Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(PermuteQASymmS8ValueSet3Test, PermuteValueSet3Test<DataType::QAsymmS8>)
ARMNN_AUTO_TEST_CASE(SimplePermuteQASymm8, SimplePermuteTest<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet1Test, PermuteValueSet1Test<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet2Test, PermuteValueSet2Test<DataType::QAsymmU8>)
@@ -519,6 +542,14 @@ ARMNN_AUTO_TEST_CASE(MeanVts1Float32, MeanVts1Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(MeanVts2Float32, MeanVts2Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(MeanVts3Float32, MeanVts3Test<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedAsymmS8, MeanSimpleTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedAsymmS8, MeanSimpleAxisTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedAsymmS8, MeanKeepDimsTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(MeanMultipleDimsQuantisedAsymmS8, MeanMultipleDimsTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(MeanVts1QuantisedAsymmS8, MeanVts1Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(MeanVts2QuantisedAsymmS8, MeanVts2Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(MeanVts3QuantisedAsymmS8, MeanVts3Test<DataType::QAsymmS8>)
+
ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedAsymm8, MeanSimpleTest<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedAsymm8, MeanSimpleAxisTest<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedAsymm8, MeanKeepDimsTest<DataType::QAsymmU8>)
@@ -733,24 +764,36 @@ ARMNN_AUTO_TEST_CASE(StridedSlice2dReverseUint8, StridedSlice2dReverseUint8Test)
ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear,
SimpleResizeBilinearTest<DataType::Float32>,
DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearInt8,
+ SimpleResizeBilinearTest<DataType::QAsymmS8>,
+ DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8,
SimpleResizeBilinearTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearNop,
ResizeBilinearNopTest<DataType::Float32>,
DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearNopInt8,
+ ResizeBilinearNopTest<DataType::QAsymmS8>,
+ DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8,
ResizeBilinearNopTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin,
ResizeBilinearSqMinTest<DataType::Float32>,
DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinInt8,
+ ResizeBilinearSqMinTest<DataType::QAsymmS8>,
+ DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8,
ResizeBilinearSqMinTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMin,
ResizeBilinearMinTest<DataType::Float32>,
DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMinInt8,
+ ResizeBilinearMinTest<DataType::QAsymmS8>,
+ DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8,
ResizeBilinearMinTest<DataType::QAsymmU8>,
DataLayout::NCHW)
@@ -759,24 +802,36 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8,
ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc,
ResizeBilinearNopTest<DataType::Float32>,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearNopInt8Nhwc,
+ ResizeBilinearNopTest<DataType::QAsymmS8>,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8Nhwc,
ResizeBilinearNopTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc,
SimpleResizeBilinearTest<DataType::Float32>,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearInt8Nhwc,
+ SimpleResizeBilinearTest<DataType::QAsymmS8>,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8Nhwc,
SimpleResizeBilinearTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc,
ResizeBilinearSqMinTest<DataType::Float32>,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinInt8Nhwc,
+ ResizeBilinearSqMinTest<DataType::QAsymmS8>,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8Nhwc,
ResizeBilinearSqMinTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc,
ResizeBilinearMinTest<DataType::Float32>,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMinInt8Nhwc,
+ ResizeBilinearMinTest<DataType::QAsymmS8>,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8Nhwc,
ResizeBilinearMinTest<DataType::QAsymmU8>,
DataLayout::NHWC)
@@ -785,30 +840,45 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8Nhwc,
ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighbor,
SimpleResizeNearestNeighborTest<DataType::Float32>,
DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborInt8,
+ SimpleResizeNearestNeighborTest<DataType::QAsymmS8>,
+ DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8,
SimpleResizeNearestNeighborTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNop,
ResizeNearestNeighborNopTest<DataType::Float32>,
DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopInt8,
+ ResizeNearestNeighborNopTest<DataType::QAsymmS8>,
+ DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8,
ResizeNearestNeighborNopTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMin,
ResizeNearestNeighborSqMinTest<DataType::Float32>,
DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinInt8,
+ ResizeNearestNeighborSqMinTest<DataType::QAsymmS8>,
+ DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8,
ResizeNearestNeighborSqMinTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMin,
ResizeNearestNeighborMinTest<DataType::Float32>,
DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinInt8,
+ ResizeNearestNeighborMinTest<DataType::QAsymmS8>,
+ DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8,
ResizeNearestNeighborMinTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMag,
ResizeNearestNeighborMagTest<DataType::Float32>,
DataLayout::NCHW, 0.1f, 50, 0.1f, 50)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagInt8,
+ ResizeNearestNeighborMagTest<DataType::QAsymmS8>,
+ DataLayout::NCHW, 0.1f, 50, 0.1f, 50)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8,
ResizeNearestNeighborMagTest<DataType::QAsymmU8>,
DataLayout::NCHW, 0.1f, 50, 0.1f, 50)
@@ -817,30 +887,45 @@ ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8,
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopNhwc,
ResizeNearestNeighborNopTest<DataType::Float32>,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopInt8Nhwc,
+ ResizeNearestNeighborNopTest<DataType::QAsymmS8>,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8Nhwc,
ResizeNearestNeighborNopTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborNhwc,
SimpleResizeNearestNeighborTest<DataType::Float32>,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborInt8Nhwc,
+ SimpleResizeNearestNeighborTest<DataType::QAsymmS8>,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8Nhwc,
SimpleResizeNearestNeighborTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinNhwc,
ResizeNearestNeighborSqMinTest<DataType::Float32>,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinInt8Nhwc,
+ ResizeNearestNeighborSqMinTest<DataType::QAsymmS8>,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8Nhwc,
ResizeNearestNeighborSqMinTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinNhwc,
ResizeNearestNeighborMinTest<DataType::Float32>,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinInt8Nhwc,
+ ResizeNearestNeighborMinTest<DataType::QAsymmS8>,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8Nhwc,
ResizeNearestNeighborMinTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagNhwc,
ResizeNearestNeighborMagTest<DataType::Float32>,
DataLayout::NHWC, 0.1f, 50, 0.1f, 50)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagInt8Nhwc,
+ ResizeNearestNeighborMagTest<DataType::QAsymmS8>,
+ DataLayout::NHWC, 0.1f, 50, 0.1f, 50)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8Nhwc,
ResizeNearestNeighborMagTest<DataType::QAsymmU8>,
DataLayout::NHWC, 0.1f, 50, 0.1f, 50)
@@ -867,6 +952,10 @@ ARMNN_AUTO_TEST_CASE(SimpleTransposeFloat32, SimpleTransposeTest<DataType::Float
ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet1Test, TransposeValueSet1Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet2Test, TransposeValueSet2Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet3Test, TransposeValueSet3Test<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(SimpleTransposeQASymmS8, SimpleTransposeTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(TransposeQASymmS8ValueSet1Test, TransposeValueSet1Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(TransposeQASymmS8ValueSet2Test, TransposeValueSet2Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(TransposeQASymmS8ValueSet3Test, TransposeValueSet3Test<DataType::QAsymmS8>)
ARMNN_AUTO_TEST_CASE(SimpleTransposeQASymm8, SimpleTransposeTest<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(TransposeQASymm8ValueSet1Test, TransposeValueSet1Test<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(TransposeQASymm8ValueSet2Test, TransposeValueSet2Test<DataType::QAsymmU8>)
diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp
index f4df76c41e..f992bd61a1 100644
--- a/src/backends/neon/test/NeonLayerTests.cpp
+++ b/src/backends/neon/test/NeonLayerTests.cpp
@@ -36,6 +36,14 @@ ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat321, BatchToSpaceNdNchwTest1<DataTyp
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat322, BatchToSpaceNdNchwTest2<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat323, BatchToSpaceNdNchwTest3<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt1, BatchToSpaceNdNhwcTest1<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt2, BatchToSpaceNdNhwcTest2<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt3, BatchToSpaceNdNhwcTest3<DataType::QAsymmS8>)
+
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt1, BatchToSpaceNdNchwTest1<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt2, BatchToSpaceNdNchwTest2<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt3, BatchToSpaceNdNchwTest3<DataType::QAsymmS8>)
+
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint1, BatchToSpaceNdNhwcTest1<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint2, BatchToSpaceNdNhwcTest2<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint3, BatchToSpaceNdNhwcTest3<DataType::QAsymmU8>)
@@ -74,6 +82,14 @@ ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Nhwc,
Convolution2d3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
false,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Int8,
+ Convolution2d3x3Dilation3x3Test<DataType::QAsymmS8, DataType::Signed32>,
+ false,
+ DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3NhwcInt8,
+ Convolution2d3x3Dilation3x3Test<DataType::QAsymmS8, DataType::Signed32>,
+ false,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Uint8,
Convolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
@@ -90,6 +106,14 @@ ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Nhwc,
Convolution2d2x3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
false,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Int8,
+ Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmS8, DataType::Signed32>,
+ false,
+ DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3NhwcInt8,
+ Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmS8, DataType::Signed32>,
+ false,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Uint8,
Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
@@ -108,6 +132,16 @@ ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Nhwc,
<DataType::Float32, DataType::Float32>,
false,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Int8,
+ Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test
+ <DataType::QAsymmS8, DataType::Signed32>,
+ false,
+ DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcInt8,
+ Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test
+ <DataType::QAsymmS8, DataType::Signed32>,
+ false,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Uint8,
Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test
<DataType::QAsymmU8, DataType::Signed32>,
@@ -141,6 +175,11 @@ ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_2, DepthToSpaceTest2<DataType::Floa
ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_3, DepthToSpaceTest3<DataType::Float16>, DataLayout::NCHW);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_4, DepthToSpaceTest4<DataType::Float16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_1, DepthToSpaceTest1<DataType::QAsymmS8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_2, DepthToSpaceTest2<DataType::QAsymmS8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_3, DepthToSpaceTest3<DataType::QAsymmS8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_4, DepthToSpaceTest4<DataType::QAsymmS8>, DataLayout::NCHW);
+
ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_1, DepthToSpaceTest1<DataType::QAsymmU8>, DataLayout::NCHW);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_2, DepthToSpaceTest2<DataType::QAsymmU8>, DataLayout::NCHW);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_3, DepthToSpaceTest3<DataType::QAsymmU8>, DataLayout::NCHW);
@@ -161,6 +200,11 @@ ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_2, DepthToSpaceTest2<DataType::Floa
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_3, DepthToSpaceTest3<DataType::Float16>, DataLayout::NHWC);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_4, DepthToSpaceTest4<DataType::Float16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_1, DepthToSpaceTest1<DataType::QAsymmS8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_2, DepthToSpaceTest2<DataType::QAsymmS8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_3, DepthToSpaceTest3<DataType::QAsymmS8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_4, DepthToSpaceTest4<DataType::QAsymmS8>, DataLayout::NHWC);
+
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_1, DepthToSpaceTest1<DataType::QAsymmU8>, DataLayout::NHWC);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_2, DepthToSpaceTest2<DataType::QAsymmU8>, DataLayout::NHWC);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_3, DepthToSpaceTest3<DataType::QAsymmU8>, DataLayout::NHWC);
@@ -685,6 +729,7 @@ ARMNN_AUTO_TEST_CASE(NotEqualBroadcast1dVectorUint8, NotEqualBroadcast1dVectorUi
// Reshape
ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeTest<armnn::DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(SimpleReshapeInt8, SimpleReshapeTest<armnn::DataType::QAsymmS8>)
ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeTest<armnn::DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(Reshape5d, Reshape5dTest<armnn::DataType::Float32>)
@@ -709,6 +754,10 @@ ARMNN_AUTO_TEST_CASE(SimplePermuteFloat32, SimplePermuteTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet1Test, PermuteValueSet1Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet2Test, PermuteValueSet2Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet3Test, PermuteValueSet3Test<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(SimplePermuteQASymmS8, SimplePermuteTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(PermuteQASymmS8ValueSet1Test, PermuteValueSet1Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(PermuteQASymmS8ValueSet2Test, PermuteValueSet2Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(PermuteQASymmS8ValueSet3Test, PermuteValueSet3Test<DataType::QAsymmS8>)
ARMNN_AUTO_TEST_CASE(SimplePermuteQASymm8, SimplePermuteTest<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet1Test, PermuteValueSet1Test<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet2Test, PermuteValueSet2Test<DataType::QAsymmU8>)
@@ -735,6 +784,14 @@ ARMNN_AUTO_TEST_CASE(MeanVts1Float32, MeanVts1Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(MeanVts2Float32, MeanVts2Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(MeanVts3Float32, MeanVts3Test<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedAsymmS8, MeanSimpleTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedAsymmS8, MeanSimpleAxisTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedAsymmS8, MeanKeepDimsTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(MeanMultipleDimsQuantisedAsymmS8, MeanMultipleDimsTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(MeanVts1QuantisedAsymmS8, MeanVts1Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(MeanVts2QuantisedAsymmS8, MeanVts2Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(MeanVts3QuantisedAsymmS8, MeanVts3Test<DataType::QAsymmS8>)
+
ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedAsymm8, MeanSimpleTest<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedAsymm8, MeanSimpleAxisTest<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedAsymm8, MeanKeepDimsTest<DataType::QAsymmU8>)
@@ -968,6 +1025,10 @@ ARMNN_AUTO_TEST_CASE(SimpleTransposeFloat32, SimpleTransposeTest<DataType::Float
ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet1Test, TransposeValueSet1Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet2Test, TransposeValueSet2Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet3Test, TransposeValueSet3Test<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(SimpleTransposeQASymms8, SimpleTransposeTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(TransposeQASymmS8ValueSet1Test, TransposeValueSet1Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(TransposeQASymmS8ValueSet2Test, TransposeValueSet2Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(TransposeQASymmS8ValueSet3Test, TransposeValueSet3Test<DataType::QAsymmS8>)
ARMNN_AUTO_TEST_CASE(SimpleTransposeQASymm8, SimpleTransposeTest<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(TransposeQASymm8ValueSet1Test, TransposeValueSet1Test<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(TransposeQASymm8ValueSet2Test, TransposeValueSet2Test<DataType::QAsymmU8>)
@@ -1037,6 +1098,14 @@ ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dFloatNhwc,
PaddedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
true,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dInt8Nchw,
+ PaddedTransposeConvolution2dTest<DataType::QAsymmS8, DataType::Signed32>,
+ true,
+ DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dInt8Nhwc,
+ PaddedTransposeConvolution2dTest<DataType::QAsymmS8, DataType::Signed32>,
+ true,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dUint8Nchw,
PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
@@ -1116,6 +1185,13 @@ ARMNN_AUTO_TEST_CASE(ArgMaxChannel, ArgMaxChannelTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(ArgMaxHeight, ArgMaxHeightTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(ArgMinWidth, ArgMinWidthTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(ArgMinQAsymmS8, ArgMinSimpleTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(ArgMaxQAsymmS8, ArgMaxSimpleTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(ArgMinChannelQAsymmS8, ArgMinChannelTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(ArgMaxChannelQAsymmS8, ArgMaxChannelTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(ArgMaxHeightQAsymmS8, ArgMaxHeightTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(ArgMinWidthQAsymmS8, ArgMinWidthTest<DataType::QAsymmS8>)
+
ARMNN_AUTO_TEST_CASE(ArgMinQAsymm8, ArgMinSimpleTest<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(ArgMaxQAsymm8, ArgMaxSimpleTest<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(ArgMinChannelQAsymm8, ArgMinChannelTest<DataType::QAsymmU8>)
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 25d639a38a..65ae14ff40 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -181,10 +181,11 @@ bool RefLayerSupport::IsArgMinMaxSupported(const armnn::TensorInfo &input, const
{
IgnoreUnused(descriptor);
- std::array<DataType, 5> supportedTypes =
+ std::array<DataType, 6> supportedTypes =
{
DataType::BFloat16,
DataType::Float32,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16,
DataType::Signed32
@@ -211,11 +212,12 @@ bool RefLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
{
IgnoreUnused(descriptor);
- std::array<DataType, 5> supportedTypes =
+ std::array<DataType, 6> supportedTypes =
{
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -260,11 +262,12 @@ bool RefLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input,
std::string outputTensorStr = "output";
// Define supported types.
- std::array<DataType,5> supportedTypes =
+ std::array<DataType,6> supportedTypes =
{
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -302,12 +305,13 @@ bool RefLayerSupport::IsComparisonSupported(const TensorInfo& input0,
Optional<std::string&> reasonIfUnsupported) const
{
IgnoreUnused(descriptor);
- std::array<DataType, 7> supportedInputTypes =
+ std::array<DataType, 8> supportedInputTypes =
{
DataType::Boolean,
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16,
DataType::Signed32
@@ -339,8 +343,8 @@ bool RefLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inp
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
- DataType::QAsymmU8,
DataType::QAsymmS8,
+ DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -366,11 +370,11 @@ bool RefLayerSupport::IsConstantSupported(const TensorInfo& output,
{
DataType::BFloat16,
DataType::Float32,
- DataType::Signed32,
- DataType::QAsymmU8,
DataType::QAsymmS8,
+ DataType::QAsymmU8,
DataType::QSymmS8,
- DataType::QSymmS16
+ DataType::QSymmS16,
+ DataType::Signed32
};
return CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
@@ -462,8 +466,8 @@ bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
- DataType::QAsymmU8,
DataType::QAsymmS8,
+ DataType::QAsymmU8,
DataType::QSymmS8,
DataType::QSymmS16
};
@@ -495,9 +499,9 @@ bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
ARMNN_NO_DEPRECATE_WARN_BEGIN
std::array<DataType, 4> supportedWeightTypes =
{
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS8,
- DataType::QAsymmS8,
DataType::QuantizedSymm8PerAxis // deprecated
};
ARMNN_NO_DEPRECATE_WARN_END
@@ -543,8 +547,8 @@ bool RefLayerSupport::IsDebugSupported(const TensorInfo& input,
DataType::BFloat16,
DataType::Float16,
DataType::Float32,
- DataType::QAsymmU8,
DataType::QAsymmS8,
+ DataType::QAsymmU8,
DataType::QSymmS8,
DataType::QSymmS16,
DataType::Signed32
@@ -570,11 +574,12 @@ bool RefLayerSupport::IsDepthToSpaceSupported(const TensorInfo& input,
IgnoreUnused(descriptor);
bool supported = true;
- std::array<DataType,5> supportedTypes =
+ std::array<DataType,6> supportedTypes =
{
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -598,6 +603,7 @@ bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
const Optional<TensorInfo>& biases,
Optional<std::string&> reasonIfUnsupported) const
{
+ IgnoreUnused(descriptor);
bool supported = true;
// Define supported types.
@@ -606,9 +612,9 @@ bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
- DataType::QSymmS8,
DataType::QAsymmS8,
DataType::QAsymmU8,
+ DataType::QSymmS8,
DataType::QSymmS16
};
@@ -621,21 +627,22 @@ bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
"Reference DepthwiseConvolution2d: input and output types mismatched.");
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::array<DataType, 3> supportedWeightTypes =
- {
- DataType::QAsymmU8,
- DataType::QSymmS8,
- DataType::QuantizedSymm8PerAxis // deprecated
- };
- ARMNN_NO_DEPRECATE_WARN_END
-
const DataType inputType = input.GetDataType();
if (IsQuantized8BitType(inputType))
{
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
+ std::array<DataType, 4> supportedWeightTypes =
+ {
+ DataType::QAsymmS8,
+ DataType::QAsymmU8,
+ DataType::QSymmS8,
+ DataType::QuantizedSymm8PerAxis // deprecated
+ };
+ ARMNN_NO_DEPRECATE_WARN_END
supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
- "Reference convolution2d: weights type not supported for quantized input.");
+ "Reference DepthwiseConvolution2d: weights type not supported for "
+ "quantized input.");
}
else
{
@@ -658,7 +665,6 @@ bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
"Reference DepthwiseConvolution2d: biases is not a supported type.");
}
- IgnoreUnused(descriptor);
return supported;
@@ -716,10 +722,11 @@ bool RefLayerSupport::IsDetectionPostProcessSupported(const TensorInfo& boxEncod
bool supported = true;
- std::array<DataType,4> supportedInputTypes =
+ std::array<DataType,5> supportedInputTypes =
{
DataType::BFloat16,
DataType::Float32,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -750,10 +757,11 @@ bool RefLayerSupport::IsDivisionSupported(const TensorInfo& input0,
{
bool supported = true;
- std::array<DataType,5> supportedTypes = {
+ std::array<DataType,6> supportedTypes = {
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -786,11 +794,12 @@ bool RefLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input,
{
IgnoreUnused(descriptor);
- std::array<DataType, 6> supportedTypes =
+ std::array<DataType, 7> supportedTypes =
{
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16,
DataType::Signed32
@@ -883,8 +892,8 @@ bool RefLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
- DataType::QAsymmU8,
DataType::QAsymmS8,
+ DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -913,8 +922,9 @@ bool RefLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
}
ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::array<DataType, 3> supportedWeightTypes =
+ std::array<DataType, 4> supportedWeightTypes =
{
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS8,
DataType::QuantizedSymm8PerAxis // deprecated
@@ -969,11 +979,12 @@ bool RefLayerSupport::IsGatherSupported(const armnn::TensorInfo& input0,
armnn::Optional<std::string&> reasonIfUnsupported) const
{
bool supported = true;
- std::array<DataType,5> supportedTypes =
+ std::array<DataType,6> supportedTypes =
{
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -1050,11 +1061,12 @@ bool RefLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
{
IgnoreUnused(descriptor);
// Define supported types
- std::array<DataType, 5> supportedTypes =
+ std::array<DataType, 6> supportedTypes =
{
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -1261,11 +1273,12 @@ bool RefLayerSupport::IsMeanSupported(const TensorInfo& input,
std::string meanLayerStr = "Mean";
std::string outputTensorStr = "output";
- std::array<DataType,5> supportedTypes =
+ std::array<DataType,6> supportedTypes =
{
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -1328,11 +1341,12 @@ bool RefLayerSupport::IsMemCopySupported(const TensorInfo &input,
{
bool supported = true;
- std::array<DataType,6> supportedTypes =
+ std::array<DataType,7> supportedTypes =
{
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16,
DataType::Boolean
@@ -1357,10 +1371,11 @@ bool RefLayerSupport::IsMinimumSupported(const TensorInfo& input0,
{
bool supported = true;
- std::array<DataType,5> supportedTypes = {
+ std::array<DataType,6> supportedTypes = {
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -1397,8 +1412,8 @@ bool RefLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
- DataType::QAsymmU8,
DataType::QAsymmS8,
+ DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -1431,11 +1446,12 @@ bool RefLayerSupport::IsNormalizationSupported(const TensorInfo& input,
IgnoreUnused(descriptor);
// Define supported types
- std::array<DataType, 5> supportedTypes =
+ std::array<DataType, 6> supportedTypes =
{
DataType::BFloat16,
DataType::Float16,
DataType::Float32,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -1470,11 +1486,12 @@ bool RefLayerSupport::IsPadSupported(const TensorInfo& input,
bool supported = true;
// Define supported output and inputs types.
- std::array<DataType,5> supportedTypes =
+ std::array<DataType,6> supportedTypes =
{
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -1500,11 +1517,12 @@ bool RefLayerSupport::IsPermuteSupported(const TensorInfo& input,
bool supported = true;
// Define supported output and inputs types.
- std::array<DataType, 5> supportedTypes =
+ std::array<DataType, 6> supportedTypes =
{
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -1574,8 +1592,8 @@ bool RefLayerSupport::IsQuantizeSupported(const TensorInfo& input,
// Define supported output types.
std::array<DataType,4> supportedOutputTypes = {
- DataType::QAsymmU8,
DataType::QAsymmS8,
+ DataType::QAsymmU8,
DataType::QSymmS8,
DataType::QSymmS16
};
@@ -1616,11 +1634,12 @@ bool RefLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
Optional<std::string&> reasonIfUnsupported) const
{
bool supported = true;
- std::array<DataType,5> supportedTypes =
+ std::array<DataType,6> supportedTypes =
{
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -1649,8 +1668,8 @@ bool RefLayerSupport::IsResizeSupported(const TensorInfo& input,
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
- DataType::QAsymmU8,
DataType::QAsymmS8,
+ DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -1684,10 +1703,11 @@ bool RefLayerSupport::IsSliceSupported(const TensorInfo& input,
IgnoreUnused(descriptor);
bool supported = true;
- std::array<DataType, 4> supportedTypes =
+ std::array<DataType, 5> supportedTypes =
{
DataType::BFloat16,
DataType::Float32,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -1741,11 +1761,12 @@ bool RefLayerSupport::IsSpaceToBatchNdSupported(const TensorInfo& input,
{
IgnoreUnused(descriptor);
bool supported = true;
- std::array<DataType,5> supportedTypes =
+ std::array<DataType,6> supportedTypes =
{
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -1771,11 +1792,12 @@ bool RefLayerSupport::IsSpaceToDepthSupported(const TensorInfo& input,
IgnoreUnused(descriptor);
bool supported = true;
- std::array<DataType,5> supportedTypes =
+ std::array<DataType,6> supportedTypes =
{
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -1798,11 +1820,12 @@ bool RefLayerSupport::IsSplitterSupported(const TensorInfo& input,
{
IgnoreUnused(descriptor);
bool supported = true;
- std::array<DataType,5> supportedTypes =
+ std::array<DataType,6> supportedTypes =
{
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -1820,11 +1843,12 @@ bool RefLayerSupport::IsSplitterSupported(const TensorInfo& input,
{
IgnoreUnused(descriptor);
bool supported = true;
- std::array<DataType,5> supportedTypes =
+ std::array<DataType,6> supportedTypes =
{
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -1851,11 +1875,12 @@ bool RefLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inp
IgnoreUnused(descriptor);
bool supported = true;
- std::array<DataType,5> supportedTypes =
+ std::array<DataType,6> supportedTypes =
{
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -1883,10 +1908,11 @@ bool RefLayerSupport::IsStridedSliceSupported(const TensorInfo& input,
IgnoreUnused(descriptor);
bool supported = true;
- std::array<DataType,4> supportedTypes =
+ std::array<DataType,5> supportedTypes =
{
DataType::BFloat16,
DataType::Float32,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -1910,10 +1936,11 @@ bool RefLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
{
bool supported = true;
- std::array<DataType,5> supportedTypes = {
+ std::array<DataType,6> supportedTypes = {
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -1946,11 +1973,12 @@ bool RefLayerSupport::IsPreluSupported(const TensorInfo& input,
{
bool supported = true;
- std::array<DataType, 5> supportedTypes
+ std::array<DataType, 6> supportedTypes
{
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -1983,12 +2011,14 @@ bool RefLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input,
IgnoreUnused(descriptor);
bool supported = true;
- std::array<DataType,5> supportedTypes =
+ std::array<DataType,7> supportedTypes =
{
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
+ DataType::QSymmS8,
DataType::QSymmS16
};
@@ -2003,11 +2033,12 @@ bool RefLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input,
const DataType inputType = input.GetDataType();
- if (inputType == DataType::QAsymmU8)
+ if (IsQuantized8BitType(inputType))
{
ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::array<DataType, 3> supportedWeightTypes =
+ std::array<DataType, 4> supportedWeightTypes =
{
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS8,
DataType::QuantizedSymm8PerAxis //Deprecated
@@ -2052,11 +2083,12 @@ bool RefLayerSupport::IsTransposeSupported(const TensorInfo& input,
bool supported = true;
// Define supported output and inputs types.
- std::array<DataType, 5> supportedTypes =
+ std::array<DataType, 6> supportedTypes =
{
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index 5d3775a59d..4566fe5e40 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -468,6 +468,10 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePad(const PadQueueDescripto
{
return std::make_unique<RefPadBFloat16Workload>(descriptor, info);
}
+ else if (IsQAsymmS8(info))
+ {
+ return std::make_unique<RefPadQAsymmS8Workload>(descriptor, info);
+ }
return MakeWorkload<RefPadFloat32Workload, RefPadQAsymm8Workload>(descriptor, info);
}
@@ -482,6 +486,10 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePermute(const PermuteQueueD
{
return std::make_unique<RefPermuteBFloat16Workload>(descriptor, info);
}
+ else if (IsQAsymmS8(info))
+ {
+ return std::make_unique<RefPermuteQAsymmS8Workload>(descriptor, info);
+ }
return MakeWorkloadHelper<RefPermuteFloat16Workload, RefPermuteFloat32Workload, RefPermuteQAsymm8Workload,
NullWorkload, NullWorkload, NullWorkload>(descriptor, info);
}
@@ -603,6 +611,10 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateTranspose(const TransposeQu
{
return std::make_unique<RefTransposeBFloat16Workload>(descriptor, info);
}
+ else if (IsQAsymmS8(info))
+ {
+ return std::make_unique<RefTransposeQAsymmS8Workload>(descriptor, info);
+ }
return MakeWorkloadHelper<RefTransposeFloat16Workload, RefTransposeFloat32Workload, RefTransposeQAsymm8Workload,
NullWorkload, NullWorkload, NullWorkload>(descriptor, info);
}
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index bcace79493..f50051aaac 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -86,6 +86,14 @@ ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Nhwc,
Convolution2d3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
false,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Int8,
+ Convolution2d3x3Dilation3x3Test<DataType::QAsymmS8, DataType::Signed32>,
+ false,
+ DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3NhwcInt8,
+ Convolution2d3x3Dilation3x3Test<DataType::QAsymmS8, DataType::Signed32>,
+ false,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Uint8,
Convolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
@@ -119,6 +127,14 @@ ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Nhwc,
Convolution2d2x3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
false,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Int8,
+ Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmS8, DataType::Signed32>,
+ false,
+ DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3NhwcInt8,
+ Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmS8, DataType::Signed32>,
+ false,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Uint8,
Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
@@ -152,6 +168,14 @@ ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Nhwc,
Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::Float32, DataType::Float32>,
false,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Int8,
+ Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QAsymmS8, DataType::Signed32>,
+ false,
+ DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcInt8,
+ Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QAsymmS8, DataType::Signed32>,
+ false,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Uint8,
Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
@@ -217,6 +241,14 @@ ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3NhwcBFloat16,
DepthwiseConvolution2d3x3Dilation3x3Test<DataType::BFloat16, DataType::BFloat16>,
false,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3Int8,
+ DepthwiseConvolution2d3x3Dilation3x3Test<DataType::QAsymmS8, DataType::Signed32>,
+ false,
+ DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3NhwcInt8,
+ DepthwiseConvolution2d3x3Dilation3x3Test<DataType::QAsymmS8, DataType::Signed32>,
+ false,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3Uint8,
DepthwiseConvolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
@@ -250,6 +282,14 @@ ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3NhwcBFloat16,
DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::BFloat16, DataType::BFloat16>,
false,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3Int8,
+ DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::QAsymmS8, DataType::Signed32>,
+ false,
+ DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3NhwcInt8,
+ DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::QAsymmS8, DataType::Signed32>,
+ false,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3Uint8,
DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
@@ -743,6 +783,9 @@ ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear,
ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearFloat16,
SimpleResizeBilinearTest<DataType::Float16>,
DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearInt8,
+ SimpleResizeBilinearTest<DataType::QAsymmS8>,
+ DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8,
SimpleResizeBilinearTest<DataType::QAsymmU8>,
DataLayout::NCHW)
@@ -755,6 +798,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearNop,
ARMNN_AUTO_TEST_CASE(ResizeBilinearNopFloat16,
ResizeBilinearNopTest<DataType::Float16>,
DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearNopInt8,
+ ResizeBilinearNopTest<DataType::QAsymmS8>,
+ DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8,
ResizeBilinearNopTest<DataType::QAsymmU8>,
DataLayout::NCHW)
@@ -767,6 +813,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin,
ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinFloat16,
ResizeBilinearSqMinTest<DataType::Float16>,
DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinInt8,
+ ResizeBilinearSqMinTest<DataType::QAsymmS8>,
+ DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8,
ResizeBilinearSqMinTest<DataType::QAsymmU8>,
DataLayout::NCHW)
@@ -779,6 +828,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMin,
ARMNN_AUTO_TEST_CASE(ResizeBilinearMinFloat16,
ResizeBilinearMinTest<DataType::Float16>,
DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMinInt8,
+ ResizeBilinearMinTest<DataType::QAsymmS8>,
+ DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8,
ResizeBilinearMinTest<DataType::QAsymmU8>,
DataLayout::NCHW)
@@ -791,6 +843,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMag,
ARMNN_AUTO_TEST_CASE(ResizeBilinearMagFloat16,
ResizeBilinearMagTest<DataType::Float16>,
DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMagInt8,
+ ResizeBilinearMagTest<DataType::QAsymmS8>,
+ DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8,
ResizeBilinearMagTest<DataType::QAsymmU8>,
DataLayout::NCHW)
@@ -805,6 +860,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc,
ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwcFloat16,
ResizeBilinearNopTest<DataType::Float16>,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearNopInt8Nhwc,
+ ResizeBilinearNopTest<DataType::QAsymmS8>,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8Nhwc,
ResizeBilinearNopTest<DataType::QAsymmU8>,
DataLayout::NHWC)
@@ -817,6 +875,9 @@ ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc,
ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwcFloat16,
SimpleResizeBilinearTest<DataType::Float16>,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearInt8Nhwc,
+ SimpleResizeBilinearTest<DataType::QAsymmS8>,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8Nhwc,
SimpleResizeBilinearTest<DataType::QAsymmU8>,
DataLayout::NHWC)
@@ -829,6 +890,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc,
ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwcFloat16,
ResizeBilinearSqMinTest<DataType::Float16>,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinInt8Nhwc,
+ ResizeBilinearSqMinTest<DataType::QAsymmS8>,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8Nhwc,
ResizeBilinearSqMinTest<DataType::QAsymmU8>,
DataLayout::NHWC)
@@ -841,6 +905,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc,
ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwcFloat16,
ResizeBilinearMinTest<DataType::Float16>,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMinInt8Nhwc,
+ ResizeBilinearMinTest<DataType::QAsymmS8>,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8Nhwc,
ResizeBilinearMinTest<DataType::QAsymmU8>,
DataLayout::NHWC)
@@ -853,6 +920,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc,
ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwcFloat16,
ResizeBilinearMagTest<DataType::Float16>,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMagInt8Nhwc,
+ ResizeBilinearMagTest<DataType::QAsymmS8>,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8Nhwc,
ResizeBilinearMagTest<DataType::QAsymmU8>,
DataLayout::NHWC)
@@ -864,6 +934,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint16Nhwc,
ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighbor,
SimpleResizeNearestNeighborTest<DataType::Float32>,
DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborInt8,
+ SimpleResizeNearestNeighborTest<DataType::QAsymmS8>,
+ DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8,
SimpleResizeNearestNeighborTest<DataType::QAsymmU8>,
DataLayout::NCHW)
@@ -873,6 +946,9 @@ ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint16,
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNop,
ResizeNearestNeighborNopTest<DataType::Float32>,
DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopInt8,
+ ResizeNearestNeighborNopTest<DataType::QAsymmS8>,
+ DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8,
ResizeNearestNeighborNopTest<DataType::QAsymmU8>,
DataLayout::NCHW)
@@ -882,6 +958,9 @@ ARMNN_AUTO_TEST_CASE(esizeNearestNeighborNopUint16,
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMin,
ResizeNearestNeighborSqMinTest<DataType::Float32>,
DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinInt8,
+ ResizeNearestNeighborSqMinTest<DataType::QAsymmS8>,
+ DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8,
ResizeNearestNeighborSqMinTest<DataType::QAsymmU8>,
DataLayout::NCHW)
@@ -891,6 +970,9 @@ ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint16,
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMin,
ResizeNearestNeighborMinTest<DataType::Float32>,
DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinInt8,
+ ResizeNearestNeighborMinTest<DataType::QAsymmS8>,
+ DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8,
ResizeNearestNeighborMinTest<DataType::QAsymmU8>,
DataLayout::NCHW)
@@ -900,6 +982,9 @@ ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint16,
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMag,
ResizeNearestNeighborMagTest<DataType::Float32>,
DataLayout::NCHW, 0.10f, 50, 0.11f, 20)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagInt8,
+ ResizeNearestNeighborMagTest<DataType::QAsymmS8>,
+ DataLayout::NCHW, 0.10f, 50, 0.11f, 20)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8,
ResizeNearestNeighborMagTest<DataType::QAsymmU8>,
DataLayout::NCHW, 0.10f, 50, 0.11f, 20)
@@ -911,6 +996,9 @@ ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint16,
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopNhwc,
ResizeNearestNeighborNopTest<DataType::Float32>,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopInt8Nhwc,
+ ResizeNearestNeighborNopTest<DataType::QAsymmS8>,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8Nhwc,
ResizeNearestNeighborNopTest<DataType::QAsymmU8>,
DataLayout::NHWC)
@@ -920,6 +1008,9 @@ ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint16Nhwc,
ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborNhwc,
SimpleResizeNearestNeighborTest<DataType::Float32>,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborInt8Nhwc,
+ SimpleResizeNearestNeighborTest<DataType::QAsymmS8>,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8Nhwc,
SimpleResizeNearestNeighborTest<DataType::QAsymmU8>,
DataLayout::NHWC)
@@ -929,6 +1020,9 @@ ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint16Nhwc,
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinNhwc,
ResizeNearestNeighborSqMinTest<DataType::Float32>,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinInt8Nhwc,
+ ResizeNearestNeighborSqMinTest<DataType::QAsymmS8>,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8Nhwc,
ResizeNearestNeighborSqMinTest<DataType::QAsymmU8>,
DataLayout::NHWC)
@@ -938,6 +1032,9 @@ ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint16Nhwc,
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinNhwc,
ResizeNearestNeighborMinTest<DataType::Float32>,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinInt8Nhwc,
+ ResizeNearestNeighborMinTest<DataType::QAsymmS8>,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8Nhwc,
ResizeNearestNeighborMinTest<DataType::QAsymmU8>,
DataLayout::NHWC)
@@ -947,6 +1044,9 @@ ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint16Nhwc,
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagNhwc,
ResizeNearestNeighborMagTest<DataType::Float32>,
DataLayout::NHWC, 0.10f, 50, 0.11f, 20)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagInt8Nhwc,
+ ResizeNearestNeighborMagTest<DataType::QAsymmS8>,
+ DataLayout::NHWC, 0.10f, 50, 0.11f, 20)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8Nhwc,
ResizeNearestNeighborMagTest<DataType::QAsymmU8>,
DataLayout::NHWC, 0.10f, 50, 0.11f, 20)
@@ -1083,6 +1183,7 @@ ARMNN_AUTO_TEST_CASE(SimpleFloorQuantisedSymm16, SimpleFloorTest<DataType::QSymm
// Reshape
ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(SimpleReshapeQuantisedAsymmS8, SimpleReshapeTest<DataType::QAsymmS8>)
ARMNN_AUTO_TEST_CASE(SimpleReshapeQuantisedAsymm8, SimpleReshapeTest<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(SimpleReshapeQuantisedSymm16, SimpleReshapeTest<DataType::QSymmS16>)
ARMNN_AUTO_TEST_CASE(Reshape5d, Reshape5dTest<DataType::Float32>)
@@ -1094,6 +1195,8 @@ ARMNN_AUTO_TEST_CASE(RsqrtZero, RsqrtZeroTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(RsqrtNegative, RsqrtNegativeTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(Rsqrt2dFloat16, Rsqrt2dTest<DataType::Float16>)
ARMNN_AUTO_TEST_CASE(Rsqrt3dFloat16, Rsqrt3dTest<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE(Rsqrt2dQuantisedAsymmS8, Rsqrt2dTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(Rsqrt3dQuantisedAsymmS8, Rsqrt3dTest<DataType::QAsymmS8>)
ARMNN_AUTO_TEST_CASE(Rsqrt2dQuantisedAsymm8, Rsqrt2dTest<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(Rsqrt3dQuantisedAsymm8, Rsqrt3dTest<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(Rsqrt2dQuantisedSymm16, Rsqrt2dTest<DataType::QSymmS16>)
@@ -1108,6 +1211,10 @@ ARMNN_AUTO_TEST_CASE(SimplePermuteFloat32, SimplePermuteTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet1Test, PermuteValueSet1Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet2Test, PermuteValueSet2Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet3Test, PermuteValueSet3Test<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(SimplePermuteQASymS8, SimplePermuteTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(PermuteQASymmS8ValueSet1Test, PermuteValueSet1Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(PermuteQASymmS8ValueSet2Test, PermuteValueSet2Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(PermuteQASymmS8ValueSet3Test, PermuteValueSet3Test<DataType::QAsymmS8>)
ARMNN_AUTO_TEST_CASE(SimplePermuteQASymm8, SimplePermuteTest<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet1Test, PermuteValueSet1Test<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet2Test, PermuteValueSet2Test<DataType::QAsymmU8>)
@@ -1168,6 +1275,14 @@ ARMNN_AUTO_TEST_CASE(MeanVts1Float32, MeanVts1Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(MeanVts2Float32, MeanVts2Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(MeanVts3Float32, MeanVts3Test<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedAsymmS8, MeanSimpleTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedAsymmS8, MeanSimpleAxisTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedAsymmS8, MeanKeepDimsTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(MeanMultipleDimsQuantisedAsymmS8, MeanMultipleDimsTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(MeanVts1QuantisedAsymmS8, MeanVts1Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(MeanVts2QuantisedAsymmS8, MeanVts2Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(MeanVts3QuantisedAsymmS8, MeanVts3Test<DataType::QAsymmS8>)
+
ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedAsymm8, MeanSimpleTest<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedAsymm8, MeanSimpleAxisTest<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedAsymm8, MeanKeepDimsTest<DataType::QAsymmU8>)
@@ -1201,6 +1316,11 @@ ARMNN_AUTO_TEST_CASE(ArgMaxChannelSigned32, ArgMaxChannelTest<DataType::Signed32
ARMNN_AUTO_TEST_CASE(ArgMaxHeightSigned32, ArgMaxHeightTest<DataType::Signed32>)
ARMNN_AUTO_TEST_CASE(ArgMinWidthSigned32, ArgMinWidthTest<DataType::Signed32>)
+ARMNN_AUTO_TEST_CASE(ArgMaxSimpleQuantisedAsymmS8, ArgMaxSimpleTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(ArgMinSimpleQuantisedAsymmS8, ArgMinSimpleTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(ArgMinChannelQuantisedAsymmS8, ArgMinChannelTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(ArgMaxChannelQuantisedAsymmS8, ArgMaxChannelTest<DataType::QAsymmS8>)
+
ARMNN_AUTO_TEST_CASE(ArgMaxSimpleQuantisedAsymm8, ArgMaxSimpleTest<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(ArgMinSimpleQuantisedAsymm8, ArgMinSimpleTest<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(ArgMinChannelQuantisedAsymm8, ArgMinChannelTest<DataType::QAsymmU8>)
@@ -1269,6 +1389,14 @@ ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat16_5, BatchToSpaceNdNhwcTest5<DataTy
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat16_6, BatchToSpaceNdNhwcTest6<DataType::Float16>)
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat16_7, BatchToSpaceNdNhwcTest7<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt1, BatchToSpaceNdNhwcTest1<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt2, BatchToSpaceNdNhwcTest2<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt3, BatchToSpaceNdNhwcTest3<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt4, BatchToSpaceNdNhwcTest4<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt5, BatchToSpaceNdNhwcTest5<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt6, BatchToSpaceNdNhwcTest6<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt7, BatchToSpaceNdNhwcTest7<DataType::QAsymmS8>)
+
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint1, BatchToSpaceNdNhwcTest1<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint2, BatchToSpaceNdNhwcTest2<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint3, BatchToSpaceNdNhwcTest3<DataType::QAsymmU8>)
@@ -1293,6 +1421,14 @@ ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat16_5, BatchToSpaceNdNchwTest5<DataTy
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat16_6, BatchToSpaceNdNchwTest6<DataType::Float16>)
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat16_7, BatchToSpaceNdNchwTest7<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt1, BatchToSpaceNdNchwTest1<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt2, BatchToSpaceNdNchwTest2<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt3, BatchToSpaceNdNchwTest3<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt4, BatchToSpaceNdNchwTest4<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt5, BatchToSpaceNdNchwTest5<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt6, BatchToSpaceNdNchwTest6<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt7, BatchToSpaceNdNchwTest7<DataType::QAsymmS8>)
+
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint1, BatchToSpaceNdNchwTest1<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint2, BatchToSpaceNdNchwTest2<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint3, BatchToSpaceNdNchwTest3<DataType::QAsymmU8>)
@@ -1320,6 +1456,11 @@ ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_2, DepthToSpaceTest2<DataType::Floa
ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_3, DepthToSpaceTest3<DataType::Float16>, DataLayout::NCHW);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_4, DepthToSpaceTest4<DataType::Float16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_1, DepthToSpaceTest1<DataType::QAsymmS8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_2, DepthToSpaceTest2<DataType::QAsymmS8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_3, DepthToSpaceTest3<DataType::QAsymmS8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_4, DepthToSpaceTest4<DataType::QAsymmS8>, DataLayout::NCHW);
+
ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_1, DepthToSpaceTest1<DataType::QAsymmU8>, DataLayout::NCHW);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_2, DepthToSpaceTest2<DataType::QAsymmU8>, DataLayout::NCHW);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_3, DepthToSpaceTest3<DataType::QAsymmU8>, DataLayout::NCHW);
@@ -1340,6 +1481,11 @@ ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_2, DepthToSpaceTest2<DataType::Floa
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_3, DepthToSpaceTest3<DataType::Float16>, DataLayout::NHWC);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_4, DepthToSpaceTest4<DataType::Float16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_1, DepthToSpaceTest1<DataType::QAsymmS8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_2, DepthToSpaceTest2<DataType::QAsymmS8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_3, DepthToSpaceTest3<DataType::QAsymmS8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_4, DepthToSpaceTest4<DataType::QAsymmS8>, DataLayout::NHWC);
+
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_1, DepthToSpaceTest1<DataType::QAsymmU8>, DataLayout::NHWC);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_2, DepthToSpaceTest2<DataType::QAsymmU8>, DataLayout::NHWC);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_3, DepthToSpaceTest3<DataType::QAsymmU8>, DataLayout::NHWC);
@@ -1468,6 +1614,8 @@ ARMNN_AUTO_TEST_CASE(Abs2dSigned32, Abs2dTest<DataType::Signed32>)
ARMNN_AUTO_TEST_CASE(Abs3dSigned32, Abs3dTest<DataType::Signed32>)
ARMNN_AUTO_TEST_CASE(AbsZeroSigned32, AbsZeroTest<DataType::Signed32>)
+ARMNN_AUTO_TEST_CASE(Abs2dQuantisedAsymmS8, Abs2dTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(Abs3dQuantisedAsymmS8, Abs3dTest<DataType::QAsymmS8>)
ARMNN_AUTO_TEST_CASE(Abs2dQuantisedAsymm8, Abs2dTest<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(Abs3dQuantisedAsymm8, Abs3dTest<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(Abs2dQuantisedSymm16, Abs2dTest<DataType::QSymmS16>)
@@ -1482,6 +1630,16 @@ BOOST_AUTO_TEST_CASE(DetectionPostProcessFastNmsFloat)
{
DetectionPostProcessFastNmsFloatTest<RefWorkloadFactory>();
}
+BOOST_AUTO_TEST_CASE(DetectionPostProcessRegularNmsInt8)
+{
+ DetectionPostProcessRegularNmsQuantizedTest<
+ RefWorkloadFactory, DataType::QAsymmS8>();
+}
+BOOST_AUTO_TEST_CASE(DetectionPostProcessFastNmsInt8)
+{
+ DetectionPostProcessRegularNmsQuantizedTest<
+ RefWorkloadFactory, DataType::QAsymmS8>();
+}
BOOST_AUTO_TEST_CASE(DetectionPostProcessRegularNmsUint8)
{
DetectionPostProcessRegularNmsQuantizedTest<
@@ -1552,6 +1710,10 @@ ARMNN_AUTO_TEST_CASE(SimpleTransposeFloat32, SimpleTransposeTest<DataType::Float
ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet1Test, TransposeValueSet1Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet2Test, TransposeValueSet2Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet3Test, TransposeValueSet3Test<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(SimpleTransposeQASymmS8, SimpleTransposeTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(TransposeQASymmS8ValueSet1Test, TransposeValueSet1Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(TransposeQASymmS8ValueSet2Test, TransposeValueSet2Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(TransposeQASymmS8ValueSet3Test, TransposeValueSet3Test<DataType::QAsymmS8>)
ARMNN_AUTO_TEST_CASE(SimpleTransposeQASymm8, SimpleTransposeTest<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(TransposeQASymm8ValueSet1Test, TransposeValueSet1Test<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(TransposeQASymm8ValueSet2Test, TransposeValueSet2Test<DataType::QAsymmU8>)
@@ -1570,6 +1732,14 @@ ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dFloatNhwc,
SimpleTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
true,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dInt8Nchw,
+ SimpleTransposeConvolution2dTest<DataType::QAsymmS8, DataType::Signed32>,
+ true,
+ DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dInt8Nhwc,
+ SimpleTransposeConvolution2dTest<DataType::QAsymmS8, DataType::Signed32>,
+ true,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dUint8Nchw,
SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
@@ -1595,6 +1765,14 @@ ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dFloatNhwc,
SimpleTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
true,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dInt8Nchw,
+ SimpleTransposeConvolution2dTest<DataType::QAsymmS8, DataType::Signed32>,
+ true,
+ DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dInt8Nhwc,
+ SimpleTransposeConvolution2dTest<DataType::QAsymmS8, DataType::Signed32>,
+ true,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dUint8Nchw,
SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
@@ -1620,6 +1798,14 @@ ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dFloatNhwc,
PaddedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
true,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dInt8Nchw,
+ PaddedTransposeConvolution2dTest<DataType::QAsymmS8, DataType::Signed32>,
+ true,
+ DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dInt8Nhwc,
+ PaddedTransposeConvolution2dTest<DataType::QAsymmS8, DataType::Signed32>,
+ true,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dUint8Nchw,
PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
@@ -1645,6 +1831,14 @@ ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dFloatNhwc,
PaddedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
true,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dInt8Nchw,
+ PaddedTransposeConvolution2dTest<DataType::QAsymmS8, DataType::Signed32>,
+ true,
+ DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dInt8Nhwc,
+ PaddedTransposeConvolution2dTest<DataType::QAsymmS8, DataType::Signed32>,
+ true,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dUint8Nchw,
PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
@@ -1670,6 +1864,14 @@ ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dFloatNhwc,
StridedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
true,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dInt8Nchw,
+ StridedTransposeConvolution2dTest<DataType::QAsymmS8, DataType::Signed32>,
+ true,
+ DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dInt8Nhwc,
+ StridedTransposeConvolution2dTest<DataType::QAsymmS8, DataType::Signed32>,
+ true,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dUint8Nchw,
StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
@@ -1695,6 +1897,14 @@ ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dFloatNhwc,
StridedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
true,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dInt8Nchw,
+ StridedTransposeConvolution2dTest<DataType::QAsymmS8, DataType::Signed32>,
+ true,
+ DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dInt8Nhwc,
+ StridedTransposeConvolution2dTest<DataType::QAsymmS8, DataType::Signed32>,
+ true,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dUint8Nchw,
StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
@@ -1718,6 +1928,12 @@ ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dFloatNchw,
ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dFloatNhwc,
MultiChannelTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dInt8Nchw,
+ MultiChannelTransposeConvolution2dTest<DataType::QAsymmS8, DataType::Signed32>,
+ DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dInt8Nhwc,
+ MultiChannelTransposeConvolution2dTest<DataType::QAsymmS8, DataType::Signed32>,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dUint8Nchw,
MultiChannelTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
DataLayout::NCHW)
@@ -1754,6 +1970,8 @@ ARMNN_AUTO_TEST_CASE(NegZero, NegZeroTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(NegNegative, NegNegativeTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(Neg2dFloat16, Neg2dTest<DataType::Float16>)
ARMNN_AUTO_TEST_CASE(Neg3dFloat16, Neg3dTest<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE(Neg2dQuantisedAsymmS8, Neg2dTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(Neg3dQuantisedAsymmS8, Neg3dTest<DataType::QAsymmS8>)
ARMNN_AUTO_TEST_CASE(Neg2dQuantisedAsymm8, Neg2dTest<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(Neg3dQuantisedAsymm8, Neg3dTest<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(Neg2dQuantisedSymm16, Neg2dTest<DataType::QSymmS16>)
diff --git a/src/backends/reference/workloads/Pad.cpp b/src/backends/reference/workloads/Pad.cpp
index ffdd469609..1b634145fc 100644
--- a/src/backends/reference/workloads/Pad.cpp
+++ b/src/backends/reference/workloads/Pad.cpp
@@ -177,6 +177,12 @@ template void Pad<uint8_t>(const TensorInfo& inputInfo,
const uint8_t* inputData,
uint8_t* outData,
const float padValue);
+template void Pad<int8_t>(const TensorInfo& inputInfo,
+ const TensorInfo& outputInfo,
+ std::vector<std::pair<unsigned int, unsigned int>> m_PadList,
+ const int8_t* inputData,
+ int8_t* outData,
+ const float padValue);
template void Pad<int16_t>(const TensorInfo& inputInfo,
const TensorInfo& outputInfo,
std::vector<std::pair<unsigned int, unsigned int>> m_PadList,
diff --git a/src/backends/reference/workloads/RefPadWorkload.cpp b/src/backends/reference/workloads/RefPadWorkload.cpp
index 777682d70c..6f82d5ffdb 100644
--- a/src/backends/reference/workloads/RefPadWorkload.cpp
+++ b/src/backends/reference/workloads/RefPadWorkload.cpp
@@ -36,6 +36,7 @@ void RefPadWorkload<DataType>::Execute() const
template class RefPadWorkload<DataType::BFloat16>;
template class RefPadWorkload<DataType::Float32>;
template class RefPadWorkload<DataType::Float16>;
+template class RefPadWorkload<DataType::QAsymmS8>;
template class RefPadWorkload<DataType::QAsymmU8>;
template class RefPadWorkload<DataType::QSymmS16>;
diff --git a/src/backends/reference/workloads/RefPadWorkload.hpp b/src/backends/reference/workloads/RefPadWorkload.hpp
index 5134ac8bff..74dcab1967 100644
--- a/src/backends/reference/workloads/RefPadWorkload.hpp
+++ b/src/backends/reference/workloads/RefPadWorkload.hpp
@@ -31,9 +31,10 @@ public:
};
using RefPadBFloat16Workload = RefPadWorkload<DataType::BFloat16>;
-using RefPadFloat32Workload = RefPadWorkload<DataType::Float32>;
-using RefPadFloat16Workload = RefPadWorkload<DataType::Float16>;
-using RefPadQAsymm8Workload = RefPadWorkload<DataType::QAsymmU8>;
-using RefPadQSymm16Workload = RefPadWorkload<DataType::QSymmS16>;
+using RefPadFloat32Workload = RefPadWorkload<DataType::Float32>;
+using RefPadFloat16Workload = RefPadWorkload<DataType::Float16>;
+using RefPadQAsymmS8Workload = RefPadWorkload<DataType::QAsymmS8>;
+using RefPadQAsymm8Workload = RefPadWorkload<DataType::QAsymmU8>;
+using RefPadQSymm16Workload = RefPadWorkload<DataType::QSymmS16>;
} //namespace armnn
diff --git a/src/backends/reference/workloads/RefPermuteWorkload.cpp b/src/backends/reference/workloads/RefPermuteWorkload.cpp
index 5751ed80a3..75e9d0acf0 100644
--- a/src/backends/reference/workloads/RefPermuteWorkload.cpp
+++ b/src/backends/reference/workloads/RefPermuteWorkload.cpp
@@ -31,6 +31,7 @@ void RefPermuteWorkload<DataType>::Execute() const
template class RefPermuteWorkload<DataType::BFloat16>;
template class RefPermuteWorkload<DataType::Float16>;
template class RefPermuteWorkload<DataType::Float32>;
+template class RefPermuteWorkload<DataType::QAsymmS8>;
template class RefPermuteWorkload<DataType::QAsymmU8>;
template class RefPermuteWorkload<DataType::QSymmS16>;
diff --git a/src/backends/reference/workloads/RefPermuteWorkload.hpp b/src/backends/reference/workloads/RefPermuteWorkload.hpp
index a8d308e47c..b9f259a8f8 100644
--- a/src/backends/reference/workloads/RefPermuteWorkload.hpp
+++ b/src/backends/reference/workloads/RefPermuteWorkload.hpp
@@ -28,9 +28,10 @@ public:
};
using RefPermuteBFloat16Workload = RefPermuteWorkload<DataType::BFloat16>;
-using RefPermuteFloat16Workload = RefPermuteWorkload<DataType::Float16>;
-using RefPermuteFloat32Workload = RefPermuteWorkload<DataType::Float32>;
-using RefPermuteQAsymm8Workload = RefPermuteWorkload<DataType::QAsymmU8>;
-using RefPermuteQSymm16Workload = RefPermuteWorkload<DataType::QSymmS16>;
+using RefPermuteFloat16Workload = RefPermuteWorkload<DataType::Float16>;
+using RefPermuteFloat32Workload = RefPermuteWorkload<DataType::Float32>;
+using RefPermuteQAsymmS8Workload = RefPermuteWorkload<DataType::QAsymmS8>;
+using RefPermuteQAsymm8Workload = RefPermuteWorkload<DataType::QAsymmU8>;
+using RefPermuteQSymm16Workload = RefPermuteWorkload<DataType::QSymmS16>;
} //namespace armnn \ No newline at end of file
diff --git a/src/backends/reference/workloads/RefTransposeWorkload.cpp b/src/backends/reference/workloads/RefTransposeWorkload.cpp
index 242668b6b1..4e027bee2e 100644
--- a/src/backends/reference/workloads/RefTransposeWorkload.cpp
+++ b/src/backends/reference/workloads/RefTransposeWorkload.cpp
@@ -30,6 +30,7 @@ void RefTransposeWorkload<DataType>::Execute() const
template class RefTransposeWorkload<DataType::BFloat16>;
template class RefTransposeWorkload<DataType::Float16>;
template class RefTransposeWorkload<DataType::Float32>;
+template class RefTransposeWorkload<DataType::QAsymmS8>;
template class RefTransposeWorkload<DataType::QAsymmU8>;
template class RefTransposeWorkload<DataType::QSymmS16>;
diff --git a/src/backends/reference/workloads/RefTransposeWorkload.hpp b/src/backends/reference/workloads/RefTransposeWorkload.hpp
index dcfe618b75..387572aab9 100644
--- a/src/backends/reference/workloads/RefTransposeWorkload.hpp
+++ b/src/backends/reference/workloads/RefTransposeWorkload.hpp
@@ -30,6 +30,7 @@ public:
using RefTransposeBFloat16Workload = RefTransposeWorkload<DataType::BFloat16>;
using RefTransposeFloat16Workload = RefTransposeWorkload<DataType::Float16>;
using RefTransposeFloat32Workload = RefTransposeWorkload<DataType::Float32>;
+using RefTransposeQAsymmS8Workload = RefTransposeWorkload<DataType::QAsymmS8>;
using RefTransposeQAsymm8Workload = RefTransposeWorkload<DataType::QAsymmU8>;
using RefTransposeQSymm16Workload = RefTransposeWorkload<DataType::QSymmS16>;