From 303980c502c721f13d65e7087be6c0758df65044 Mon Sep 17 00:00:00 2001 From: Sadik Armagan Date: Fri, 17 Apr 2020 12:45:14 +0100 Subject: IVGCVSW-4668 Add TENSOR_QUANT8_ASYMM_SIGNED data type support to CpuRef operators Signed-off-by: Teresa Charlin Signed-off-by: Sadik Armagan Change-Id: I094125ba80699cc3cf5226bda6662a54e6caa988 --- src/backends/backendsCommon/WorkloadData.cpp | 75 ++++--- .../backendsCommon/test/layerTests/AbsTestImpl.cpp | 10 + .../test/layerTests/ArgMinMaxTestImpl.cpp | 30 +++ .../test/layerTests/Conv2dTestImpl.cpp | 37 ++++ .../test/layerTests/DepthToSpaceTestImpl.cpp | 25 +++ .../backendsCommon/test/layerTests/NegTestImpl.cpp | 10 + .../test/layerTests/ReshapeTestImpl.cpp | 10 + .../test/layerTests/ResizeTestImpl.cpp | 65 ++++++ .../test/layerTests/RsqrtTestImpl.cpp | 10 + .../layerTests/TransposeConvolution2dTestImpl.cpp | 27 +++ src/backends/cl/test/ClLayerTests.cpp | 89 +++++++++ src/backends/neon/test/NeonLayerTests.cpp | 76 +++++++ src/backends/reference/RefLayerSupport.cpp | 140 ++++++++----- src/backends/reference/RefWorkloadFactory.cpp | 12 ++ src/backends/reference/test/RefLayerTests.cpp | 218 +++++++++++++++++++++ src/backends/reference/workloads/Pad.cpp | 6 + .../reference/workloads/RefPadWorkload.cpp | 1 + .../reference/workloads/RefPadWorkload.hpp | 9 +- .../reference/workloads/RefPermuteWorkload.cpp | 1 + .../reference/workloads/RefPermuteWorkload.hpp | 9 +- .../reference/workloads/RefTransposeWorkload.cpp | 1 + .../reference/workloads/RefTransposeWorkload.hpp | 1 + 22 files changed, 775 insertions(+), 87 deletions(-) diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp index 5fe056e669..d1249a492f 100644 --- a/src/backends/backendsCommon/WorkloadData.cpp +++ b/src/backends/backendsCommon/WorkloadData.cpp @@ -365,8 +365,8 @@ void ValidateWeightDataType(const TensorInfo& inputInfo, ARMNN_NO_DEPRECATE_WARN_BEGIN const std::vector validTypes = { - DataType::QAsymmU8, DataType::QAsymmS8, + DataType::QAsymmU8, DataType::QSymmS8, DataType::QuantizedSymm8PerAxis // deprecated }; @@ -633,6 +633,7 @@ void ArgMinMaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const DataType::BFloat16, DataType::Float16, DataType::Float32, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16, DataType::Signed32 @@ -715,6 +716,7 @@ void SplitterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const DataType::Float16, DataType::Boolean, DataType::Signed32, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -852,6 +854,7 @@ void ConcatQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const DataType::Float16, DataType::Boolean, DataType::Signed32, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -940,6 +943,7 @@ void StackQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const DataType::Float16, DataType::Boolean, DataType::Signed32, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1040,6 +1044,7 @@ void NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) co DataType::BFloat16, DataType::Float16, DataType::Float32, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1101,11 +1106,11 @@ void MultiplicationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c std::vector supportedTypes = { DataType::BFloat16, + DataType::Float16, DataType::Float32, - DataType::QAsymmU8, DataType::QAsymmS8, - DataType::QSymmS16, - DataType::Float16 + DataType::QAsymmU8, + DataType::QSymmS16 }; ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName); @@ -1138,6 +1143,7 @@ void BatchNormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInf DataType::BFloat16, DataType::Float16, DataType::Float32, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1209,12 +1215,12 @@ void Convolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) co std::vector supportedTypes = { DataType::BFloat16, + DataType::Float16, DataType::Float32, DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16, - DataType::QSymmS8, - DataType::Float16 + DataType::QSymmS8 }; ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName); @@ -1298,11 +1304,11 @@ void DepthwiseConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloa std::vector supportedTypes = { DataType::BFloat16, + DataType::Float16, DataType::Float32, - DataType::QAsymmU8, DataType::QAsymmS8, - DataType::QSymmS16, - DataType::Float16 + DataType::QAsymmU8, + DataType::QSymmS16 }; ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName); @@ -1383,6 +1389,7 @@ void ResizeBilinearQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c DataType::BFloat16, DataType::Float16, DataType::Float32, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1535,6 +1542,7 @@ void L2NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1587,11 +1595,11 @@ void ConstantQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const DataType::BFloat16, DataType::Float32, DataType::Float16, - DataType::Signed32, - DataType::QAsymmU8, DataType::QAsymmS8, + DataType::QAsymmU8, DataType::QSymmS8, - DataType::QSymmS16 + DataType::QSymmS16, + DataType::Signed32 }; ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName); @@ -1615,10 +1623,10 @@ void ReshapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const DataType::BFloat16, DataType::Float32, DataType::Float16, - DataType::Signed32, - DataType::QSymmS16, DataType::QAsymmS8, - DataType::QAsymmU8 + DataType::QAsymmU8, + DataType::QSymmS16, + DataType::Signed32 }; ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName); @@ -1683,6 +1691,7 @@ void SpaceToBatchNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c DataType::BFloat16, DataType::Float16, DataType::Float32, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1709,6 +1718,7 @@ void SpaceToDepthQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) con DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -2146,11 +2156,12 @@ void DivisionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const std::vector supportedTypes = { + DataType::BFloat16, + DataType::Float16, DataType::Float32, + DataType::QAsymmS8, DataType::QAsymmU8, - DataType::QSymmS16, - DataType::Float16, - DataType::BFloat16 + DataType::QSymmS16 }; ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName); @@ -2178,11 +2189,12 @@ void SubtractionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) cons std::vector supportedTypes = { + DataType::BFloat16, + DataType::Float16, DataType::Float32, + DataType::QAsymmS8, DataType::QAsymmU8, - DataType::QSymmS16, - DataType::Float16, - DataType::BFloat16 + DataType::QSymmS16 }; ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName); @@ -2213,10 +2225,10 @@ void MaximumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const DataType::BFloat16, DataType::Float16, DataType::Float32, - DataType::Signed32, DataType::QAsymmS8, DataType::QAsymmU8, - DataType::QSymmS16 + DataType::QSymmS16, + DataType::Signed32 }; ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName); @@ -2246,6 +2258,7 @@ void MeanQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -2340,6 +2353,7 @@ void BatchToSpaceNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -2363,6 +2377,7 @@ void StridedSliceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) con DataType::BFloat16, DataType::Float16, DataType::Float32, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -2420,9 +2435,10 @@ void MinimumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const DataType::BFloat16, DataType::Float16, DataType::Float32, - DataType::Signed32, + DataType::QAsymmS8, DataType::QAsymmU8, - DataType::QSymmS16 + DataType::QSymmS16, + DataType::Signed32 }; ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName); @@ -2510,6 +2526,7 @@ void RsqrtQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const DataType::BFloat16, DataType::Float16, DataType::Float32, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -2539,6 +2556,7 @@ void GatherQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const DataType::BFloat16, DataType::Float16, DataType::Float32, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -2586,6 +2604,7 @@ void DetectionPostProcessQueueDescriptor::Validate(const WorkloadInfo& workloadI DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -2678,6 +2697,7 @@ void SwitchQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const { DataType::BFloat16, DataType::Float32, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -2722,6 +2742,7 @@ void PreluQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const DataType::BFloat16, DataType::Float16, DataType::Float32, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -2785,6 +2806,7 @@ void TransposeConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloa DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -3010,6 +3032,7 @@ void AbsQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const DataType::BFloat16, DataType::Float16, DataType::Float32, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16, DataType::Signed32 @@ -3092,6 +3115,7 @@ void DepthToSpaceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) con DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -3167,6 +3191,7 @@ void ElementwiseUnaryQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) DataType::BFloat16, DataType::Float16, DataType::Float32, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16, DataType::Signed32 diff --git a/src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp index e6c6a96a9f..2cbc059044 100644 --- a/src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp @@ -114,6 +114,11 @@ Abs2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +template LayerTestResult, 2> +Abs2dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + template LayerTestResult, 2> Abs2dTest( armnn::IWorkloadFactory& workloadFactory, @@ -134,6 +139,11 @@ Abs3dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +template LayerTestResult, 3> +Abs3dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + template LayerTestResult, 3> Abs3dTest( armnn::IWorkloadFactory& workloadFactory, diff --git a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp index 20dcef5dd4..45ac05387a 100644 --- a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp @@ -262,6 +262,11 @@ ArgMaxSimpleTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +template LayerTestResult +ArgMaxSimpleTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + template LayerTestResult ArgMaxSimpleTest( armnn::IWorkloadFactory& workloadFactory, @@ -282,6 +287,11 @@ ArgMinSimpleTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +template LayerTestResult +ArgMinSimpleTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + template LayerTestResult ArgMinSimpleTest( armnn::IWorkloadFactory& workloadFactory, @@ -302,6 +312,11 @@ ArgMinChannelTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +template LayerTestResult +ArgMinChannelTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + template LayerTestResult ArgMinChannelTest( armnn::IWorkloadFactory& workloadFactory, @@ -322,6 +337,11 @@ ArgMaxChannelTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +template LayerTestResult +ArgMaxChannelTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + template LayerTestResult ArgMaxChannelTest( armnn::IWorkloadFactory& workloadFactory, @@ -347,6 +367,11 @@ ArgMaxHeightTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +template LayerTestResult +ArgMaxHeightTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + template LayerTestResult ArgMaxHeightTest( armnn::IWorkloadFactory& workloadFactory, @@ -362,6 +387,11 @@ ArgMinWidthTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +template LayerTestResult +ArgMinWidthTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + template LayerTestResult ArgMinWidthTest( armnn::IWorkloadFactory& workloadFactory, diff --git a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp index c66027efdf..154ece2657 100644 --- a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp @@ -1019,6 +1019,7 @@ LayerTestResult Convolution2d3x3DilationTestCommon( switch (ArmnnType) { case armnn::DataType::QAsymmU8: + case armnn::DataType::QAsymmS8: { qScale = 0.1f; qOffset = 128; @@ -2520,6 +2521,7 @@ LayerTestResult DepthwiseConvolution2d3x3DilationTestCommon( int32_t qOffset; switch (ArmnnType) { + case armnn::DataType::QAsymmS8: case armnn::DataType::QAsymmU8: { qScale = 0.1f; @@ -3022,6 +3024,13 @@ Convolution2d3x3Dilation3x3Test, 4> +Convolution2d3x3Dilation3x3Test( + armnn::IWorkloadFactory&, + const armnn::IBackendInternal::IMemoryManagerSharedPtr&, + bool, + armnn::DataLayout); + template LayerTestResult, 4> Convolution2d3x3Dilation3x3Test( armnn::IWorkloadFactory&, @@ -3050,6 +3059,13 @@ Convolution2d2x3x3Dilation3x3Test, 4> +Convolution2d2x3x3Dilation3x3Test( + armnn::IWorkloadFactory&, + const armnn::IBackendInternal::IMemoryManagerSharedPtr&, + bool, + armnn::DataLayout); + template LayerTestResult, 4> Convolution2d2x3x3Dilation3x3Test( armnn::IWorkloadFactory&, @@ -3078,6 +3094,13 @@ Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test, 4> +Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test( + armnn::IWorkloadFactory &workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, + bool biasEnabled, + const armnn::DataLayout layout); + template LayerTestResult, 4> Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test( armnn::IWorkloadFactory &workloadFactory, @@ -3106,6 +3129,13 @@ DepthwiseConvolution2d3x3Dilation3x3Test, 4> +DepthwiseConvolution2d3x3Dilation3x3Test( + armnn::IWorkloadFactory&, + const armnn::IBackendInternal::IMemoryManagerSharedPtr&, + bool, + armnn::DataLayout); + template LayerTestResult, 4> DepthwiseConvolution2d3x3Dilation3x3Test( armnn::IWorkloadFactory&, @@ -3134,6 +3164,13 @@ DepthwiseConvolution2d2x3x3Dilation3x3Test, 4> +DepthwiseConvolution2d2x3x3Dilation3x3Test( + armnn::IWorkloadFactory&, + const armnn::IBackendInternal::IMemoryManagerSharedPtr&, + bool, + armnn::DataLayout); + template LayerTestResult, 4> DepthwiseConvolution2d2x3x3Dilation3x3Test( armnn::IWorkloadFactory&, diff --git a/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp index 4d4a6bc156..69994ddb03 100644 --- a/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp @@ -307,6 +307,31 @@ DepthToSpaceTest4( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, armnn::DataLayout dataLayout); +// QuantisedAsymmS8 +template LayerTestResult, 4> +DepthToSpaceTest1( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + armnn::DataLayout dataLayout); + +template LayerTestResult, 4> +DepthToSpaceTest2( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + armnn::DataLayout dataLayout); + +template LayerTestResult, 4> +DepthToSpaceTest3( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + armnn::DataLayout dataLayout); + +template LayerTestResult, 4> +DepthToSpaceTest4( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + armnn::DataLayout dataLayout); + // QuantisedSymm16 template LayerTestResult, 4> DepthToSpaceTest1( diff --git a/src/backends/backendsCommon/test/layerTests/NegTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/NegTestImpl.cpp index aace926dcb..f2ed22238e 100644 --- a/src/backends/backendsCommon/test/layerTests/NegTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/NegTestImpl.cpp @@ -133,6 +133,11 @@ Neg2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +template LayerTestResult, 2> +Neg2dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + template LayerTestResult, 2> Neg2dTest( armnn::IWorkloadFactory& workloadFactory, @@ -153,6 +158,11 @@ Neg3dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +template LayerTestResult, 3> +Neg3dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + template LayerTestResult, 3> Neg3dTest( armnn::IWorkloadFactory& workloadFactory, diff --git a/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp index 5ed947d8c3..979d0a7f73 100644 --- a/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp @@ -176,6 +176,11 @@ SimpleReshapeTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +template LayerTestResult, 4> +SimpleReshapeTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + template LayerTestResult, 4> SimpleReshapeTest( armnn::IWorkloadFactory& workloadFactory, @@ -191,6 +196,11 @@ Reshape5dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +template LayerTestResult, 5> +Reshape5dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + template LayerTestResult, 5> Reshape5dTest( armnn::IWorkloadFactory& workloadFactory, diff --git a/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp index e95f18b7a5..f12f53c794 100644 --- a/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp @@ -727,6 +727,71 @@ ResizeNearestNeighborMagTest( float outQuantScale, int32_t outQuantOffset); +// QAsymmS8 +template LayerTestResult, 4> +ResizeBilinearNopTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout); + +template LayerTestResult, 4> +SimpleResizeBilinearTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout); + +template LayerTestResult, 4> +ResizeBilinearSqMinTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout); + +template LayerTestResult, 4> +ResizeBilinearMinTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout); + +template LayerTestResult, 4> +ResizeBilinearMagTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout); + +template LayerTestResult, 4> +ResizeNearestNeighborNopTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout); + +template LayerTestResult, 4> +SimpleResizeNearestNeighborTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout); + +template LayerTestResult, 4> +ResizeNearestNeighborSqMinTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout); + +template LayerTestResult, 4> +ResizeNearestNeighborMinTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout); + +template LayerTestResult, 4> +ResizeNearestNeighborMagTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout, + float inQuantScale, + int32_t inQuantOffset, + float outQuantScale, + int32_t outQuantOffset); + // QSymm16 template LayerTestResult, 4> ResizeBilinearNopTest( diff --git a/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp index ca423835dc..367c82fb7c 100644 --- a/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp @@ -133,6 +133,11 @@ Rsqrt2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +template LayerTestResult, 2> +Rsqrt2dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + template LayerTestResult, 2> Rsqrt2dTest( armnn::IWorkloadFactory& workloadFactory, @@ -153,6 +158,11 @@ Rsqrt3dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +template LayerTestResult, 3> +Rsqrt3dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + template LayerTestResult, 3> Rsqrt3dTest( armnn::IWorkloadFactory& workloadFactory, diff --git a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp index 07f52584ca..813c623cff 100644 --- a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp @@ -671,6 +671,13 @@ SimpleTransposeConvolution2dTest, 4> +SimpleTransposeConvolution2dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + bool biasEnabled, + const armnn::DataLayout layout); + template LayerTestResult, 4> SimpleTransposeConvolution2dTest( armnn::IWorkloadFactory& workloadFactory, @@ -692,6 +699,13 @@ PaddedTransposeConvolution2dTest, 4> +PaddedTransposeConvolution2dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + bool biasEnabled, + const armnn::DataLayout layout); + template LayerTestResult, 4> PaddedTransposeConvolution2dTest( armnn::IWorkloadFactory& workloadFactory, @@ -713,6 +727,13 @@ StridedTransposeConvolution2dTest, 4> +StridedTransposeConvolution2dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + bool biasEnabled, + const armnn::DataLayout layout); + template LayerTestResult, 4> StridedTransposeConvolution2dTest( armnn::IWorkloadFactory& workloadFactory, @@ -733,6 +754,12 @@ MultiChannelTransposeConvolution2dTest, 4> +MultiChannelTransposeConvolution2dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout layout); + template LayerTestResult, 4> MultiChannelTransposeConvolution2dTest( armnn::IWorkloadFactory& workloadFactory, diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp index 509da41f81..ce4496e71b 100644 --- a/src/backends/cl/test/ClLayerTests.cpp +++ b/src/backends/cl/test/ClLayerTests.cpp @@ -81,6 +81,14 @@ ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat321, BatchToSpaceNdNchwTest1) ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat323, BatchToSpaceNdNchwTest3) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt1, BatchToSpaceNdNhwcTest1) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt2, BatchToSpaceNdNhwcTest2) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt3, BatchToSpaceNdNhwcTest3) + +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt1, BatchToSpaceNdNchwTest1) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt2, BatchToSpaceNdNchwTest2) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt3, BatchToSpaceNdNchwTest3) + ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint1, BatchToSpaceNdNhwcTest1) ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint2, BatchToSpaceNdNhwcTest2) ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint3, BatchToSpaceNdNhwcTest3) @@ -414,6 +422,11 @@ ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_2, DepthToSpaceTest2, DataLayout::NCHW); ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_4, DepthToSpaceTest4, DataLayout::NCHW); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_1, DepthToSpaceTest1, DataLayout::NCHW); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_2, DepthToSpaceTest2, DataLayout::NCHW); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_3, DepthToSpaceTest3, DataLayout::NCHW); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_4, DepthToSpaceTest4, DataLayout::NCHW); + ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_1, DepthToSpaceTest1, DataLayout::NCHW); ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_2, DepthToSpaceTest2, DataLayout::NCHW); ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_3, DepthToSpaceTest3, DataLayout::NCHW); @@ -434,6 +447,11 @@ ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_2, DepthToSpaceTest2, DataLayout::NHWC); ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_4, DepthToSpaceTest4, DataLayout::NHWC); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_1, DepthToSpaceTest1, DataLayout::NHWC); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_2, DepthToSpaceTest2, DataLayout::NHWC); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_3, DepthToSpaceTest3, DataLayout::NHWC); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_4, DepthToSpaceTest4, DataLayout::NHWC); + ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_1, DepthToSpaceTest1, DataLayout::NHWC); ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_2, DepthToSpaceTest2, DataLayout::NHWC); ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_3, DepthToSpaceTest3, DataLayout::NHWC); @@ -449,6 +467,7 @@ ARMNN_AUTO_TEST_CASE(SimpleFloor, SimpleFloorTest) // Reshape ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeTest) +ARMNN_AUTO_TEST_CASE(SimpleReshapeInt8, SimpleReshapeTest) ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeTest) ARMNN_AUTO_TEST_CASE(Reshape5d, Reshape5dTest) @@ -477,6 +496,10 @@ ARMNN_AUTO_TEST_CASE(SimplePermuteFloat32, SimplePermuteTest) ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet1Test, PermuteValueSet1Test) ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet2Test, PermuteValueSet2Test) ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet3Test, PermuteValueSet3Test) +ARMNN_AUTO_TEST_CASE(SimplePermuteQASymmS8, SimplePermuteTest) +ARMNN_AUTO_TEST_CASE(PermuteQASymmS8ValueSet1Test, PermuteValueSet1Test) +ARMNN_AUTO_TEST_CASE(PermuteQASymmS8ValueSet2Test, PermuteValueSet2Test) +ARMNN_AUTO_TEST_CASE(PermuteQASymmS8ValueSet3Test, PermuteValueSet3Test) ARMNN_AUTO_TEST_CASE(SimplePermuteQASymm8, SimplePermuteTest) ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet1Test, PermuteValueSet1Test) ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet2Test, PermuteValueSet2Test) @@ -519,6 +542,14 @@ ARMNN_AUTO_TEST_CASE(MeanVts1Float32, MeanVts1Test) ARMNN_AUTO_TEST_CASE(MeanVts2Float32, MeanVts2Test) ARMNN_AUTO_TEST_CASE(MeanVts3Float32, MeanVts3Test) +ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedAsymmS8, MeanSimpleTest) +ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedAsymmS8, MeanSimpleAxisTest) +ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedAsymmS8, MeanKeepDimsTest) +ARMNN_AUTO_TEST_CASE(MeanMultipleDimsQuantisedAsymmS8, MeanMultipleDimsTest) +ARMNN_AUTO_TEST_CASE(MeanVts1QuantisedAsymmS8, MeanVts1Test) +ARMNN_AUTO_TEST_CASE(MeanVts2QuantisedAsymmS8, MeanVts2Test) +ARMNN_AUTO_TEST_CASE(MeanVts3QuantisedAsymmS8, MeanVts3Test) + ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedAsymm8, MeanSimpleTest) ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedAsymm8, MeanSimpleAxisTest) ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedAsymm8, MeanKeepDimsTest) @@ -733,24 +764,36 @@ ARMNN_AUTO_TEST_CASE(StridedSlice2dReverseUint8, StridedSlice2dReverseUint8Test) ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, SimpleResizeBilinearTest, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearInt8, + SimpleResizeBilinearTest, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8, SimpleResizeBilinearTest, DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeBilinearNop, ResizeBilinearNopTest, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearNopInt8, + ResizeBilinearNopTest, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8, ResizeBilinearNopTest, DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin, ResizeBilinearSqMinTest, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinInt8, + ResizeBilinearSqMinTest, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8, ResizeBilinearSqMinTest, DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeBilinearMin, ResizeBilinearMinTest, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMinInt8, + ResizeBilinearMinTest, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8, ResizeBilinearMinTest, DataLayout::NCHW) @@ -759,24 +802,36 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8, ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc, ResizeBilinearNopTest, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearNopInt8Nhwc, + ResizeBilinearNopTest, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8Nhwc, ResizeBilinearNopTest, DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc, SimpleResizeBilinearTest, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearInt8Nhwc, + SimpleResizeBilinearTest, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8Nhwc, SimpleResizeBilinearTest, DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc, ResizeBilinearSqMinTest, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinInt8Nhwc, + ResizeBilinearSqMinTest, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8Nhwc, ResizeBilinearSqMinTest, DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc, ResizeBilinearMinTest, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMinInt8Nhwc, + ResizeBilinearMinTest, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8Nhwc, ResizeBilinearMinTest, DataLayout::NHWC) @@ -785,30 +840,45 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8Nhwc, ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighbor, SimpleResizeNearestNeighborTest, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborInt8, + SimpleResizeNearestNeighborTest, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8, SimpleResizeNearestNeighborTest, DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNop, ResizeNearestNeighborNopTest, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopInt8, + ResizeNearestNeighborNopTest, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8, ResizeNearestNeighborNopTest, DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMin, ResizeNearestNeighborSqMinTest, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinInt8, + ResizeNearestNeighborSqMinTest, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8, ResizeNearestNeighborSqMinTest, DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMin, ResizeNearestNeighborMinTest, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinInt8, + ResizeNearestNeighborMinTest, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8, ResizeNearestNeighborMinTest, DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMag, ResizeNearestNeighborMagTest, DataLayout::NCHW, 0.1f, 50, 0.1f, 50) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagInt8, + ResizeNearestNeighborMagTest, + DataLayout::NCHW, 0.1f, 50, 0.1f, 50) ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8, ResizeNearestNeighborMagTest, DataLayout::NCHW, 0.1f, 50, 0.1f, 50) @@ -817,30 +887,45 @@ ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8, ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopNhwc, ResizeNearestNeighborNopTest, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopInt8Nhwc, + ResizeNearestNeighborNopTest, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8Nhwc, ResizeNearestNeighborNopTest, DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborNhwc, SimpleResizeNearestNeighborTest, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborInt8Nhwc, + SimpleResizeNearestNeighborTest, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8Nhwc, SimpleResizeNearestNeighborTest, DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinNhwc, ResizeNearestNeighborSqMinTest, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinInt8Nhwc, + ResizeNearestNeighborSqMinTest, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8Nhwc, ResizeNearestNeighborSqMinTest, DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinNhwc, ResizeNearestNeighborMinTest, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinInt8Nhwc, + ResizeNearestNeighborMinTest, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8Nhwc, ResizeNearestNeighborMinTest, DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagNhwc, ResizeNearestNeighborMagTest, DataLayout::NHWC, 0.1f, 50, 0.1f, 50) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagInt8Nhwc, + ResizeNearestNeighborMagTest, + DataLayout::NHWC, 0.1f, 50, 0.1f, 50) ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8Nhwc, ResizeNearestNeighborMagTest, DataLayout::NHWC, 0.1f, 50, 0.1f, 50) @@ -867,6 +952,10 @@ ARMNN_AUTO_TEST_CASE(SimpleTransposeFloat32, SimpleTransposeTest) ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet2Test, TransposeValueSet2Test) ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet3Test, TransposeValueSet3Test) +ARMNN_AUTO_TEST_CASE(SimpleTransposeQASymmS8, SimpleTransposeTest) +ARMNN_AUTO_TEST_CASE(TransposeQASymmS8ValueSet1Test, TransposeValueSet1Test) +ARMNN_AUTO_TEST_CASE(TransposeQASymmS8ValueSet2Test, TransposeValueSet2Test) +ARMNN_AUTO_TEST_CASE(TransposeQASymmS8ValueSet3Test, TransposeValueSet3Test) ARMNN_AUTO_TEST_CASE(SimpleTransposeQASymm8, SimpleTransposeTest) ARMNN_AUTO_TEST_CASE(TransposeQASymm8ValueSet1Test, TransposeValueSet1Test) ARMNN_AUTO_TEST_CASE(TransposeQASymm8ValueSet2Test, TransposeValueSet2Test) diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp index f4df76c41e..f992bd61a1 100644 --- a/src/backends/neon/test/NeonLayerTests.cpp +++ b/src/backends/neon/test/NeonLayerTests.cpp @@ -36,6 +36,14 @@ ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat321, BatchToSpaceNdNchwTest1) ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat323, BatchToSpaceNdNchwTest3) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt1, BatchToSpaceNdNhwcTest1) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt2, BatchToSpaceNdNhwcTest2) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt3, BatchToSpaceNdNhwcTest3) + +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt1, BatchToSpaceNdNchwTest1) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt2, BatchToSpaceNdNchwTest2) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt3, BatchToSpaceNdNchwTest3) + ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint1, BatchToSpaceNdNhwcTest1) ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint2, BatchToSpaceNdNhwcTest2) ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint3, BatchToSpaceNdNhwcTest3) @@ -74,6 +82,14 @@ ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Nhwc, Convolution2d3x3Dilation3x3Test, false, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Int8, + Convolution2d3x3Dilation3x3Test, + false, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3NhwcInt8, + Convolution2d3x3Dilation3x3Test, + false, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Uint8, Convolution2d3x3Dilation3x3Test, false, @@ -90,6 +106,14 @@ ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Nhwc, Convolution2d2x3x3Dilation3x3Test, false, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Int8, + Convolution2d2x3x3Dilation3x3Test, + false, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3NhwcInt8, + Convolution2d2x3x3Dilation3x3Test, + false, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Uint8, Convolution2d2x3x3Dilation3x3Test, false, @@ -108,6 +132,16 @@ ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Nhwc, , false, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Int8, + Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test + , + false, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcInt8, + Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test + , + false, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Uint8, Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test , @@ -141,6 +175,11 @@ ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_2, DepthToSpaceTest2, DataLayout::NCHW); ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_4, DepthToSpaceTest4, DataLayout::NCHW); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_1, DepthToSpaceTest1, DataLayout::NCHW); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_2, DepthToSpaceTest2, DataLayout::NCHW); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_3, DepthToSpaceTest3, DataLayout::NCHW); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_4, DepthToSpaceTest4, DataLayout::NCHW); + ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_1, DepthToSpaceTest1, DataLayout::NCHW); ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_2, DepthToSpaceTest2, DataLayout::NCHW); ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_3, DepthToSpaceTest3, DataLayout::NCHW); @@ -161,6 +200,11 @@ ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_2, DepthToSpaceTest2, DataLayout::NHWC); ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_4, DepthToSpaceTest4, DataLayout::NHWC); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_1, DepthToSpaceTest1, DataLayout::NHWC); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_2, DepthToSpaceTest2, DataLayout::NHWC); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_3, DepthToSpaceTest3, DataLayout::NHWC); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_4, DepthToSpaceTest4, DataLayout::NHWC); + ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_1, DepthToSpaceTest1, DataLayout::NHWC); ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_2, DepthToSpaceTest2, DataLayout::NHWC); ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_3, DepthToSpaceTest3, DataLayout::NHWC); @@ -685,6 +729,7 @@ ARMNN_AUTO_TEST_CASE(NotEqualBroadcast1dVectorUint8, NotEqualBroadcast1dVectorUi // Reshape ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeTest) +ARMNN_AUTO_TEST_CASE(SimpleReshapeInt8, SimpleReshapeTest) ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeTest) ARMNN_AUTO_TEST_CASE(Reshape5d, Reshape5dTest) @@ -709,6 +754,10 @@ ARMNN_AUTO_TEST_CASE(SimplePermuteFloat32, SimplePermuteTest) ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet1Test, PermuteValueSet1Test) ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet2Test, PermuteValueSet2Test) ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet3Test, PermuteValueSet3Test) +ARMNN_AUTO_TEST_CASE(SimplePermuteQASymmS8, SimplePermuteTest) +ARMNN_AUTO_TEST_CASE(PermuteQASymmS8ValueSet1Test, PermuteValueSet1Test) +ARMNN_AUTO_TEST_CASE(PermuteQASymmS8ValueSet2Test, PermuteValueSet2Test) +ARMNN_AUTO_TEST_CASE(PermuteQASymmS8ValueSet3Test, PermuteValueSet3Test) ARMNN_AUTO_TEST_CASE(SimplePermuteQASymm8, SimplePermuteTest) ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet1Test, PermuteValueSet1Test) ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet2Test, PermuteValueSet2Test) @@ -735,6 +784,14 @@ ARMNN_AUTO_TEST_CASE(MeanVts1Float32, MeanVts1Test) ARMNN_AUTO_TEST_CASE(MeanVts2Float32, MeanVts2Test) ARMNN_AUTO_TEST_CASE(MeanVts3Float32, MeanVts3Test) +ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedAsymmS8, MeanSimpleTest) +ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedAsymmS8, MeanSimpleAxisTest) +ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedAsymmS8, MeanKeepDimsTest) +ARMNN_AUTO_TEST_CASE(MeanMultipleDimsQuantisedAsymmS8, MeanMultipleDimsTest) +ARMNN_AUTO_TEST_CASE(MeanVts1QuantisedAsymmS8, MeanVts1Test) +ARMNN_AUTO_TEST_CASE(MeanVts2QuantisedAsymmS8, MeanVts2Test) +ARMNN_AUTO_TEST_CASE(MeanVts3QuantisedAsymmS8, MeanVts3Test) + ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedAsymm8, MeanSimpleTest) ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedAsymm8, MeanSimpleAxisTest) ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedAsymm8, MeanKeepDimsTest) @@ -968,6 +1025,10 @@ ARMNN_AUTO_TEST_CASE(SimpleTransposeFloat32, SimpleTransposeTest) ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet2Test, TransposeValueSet2Test) ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet3Test, TransposeValueSet3Test) +ARMNN_AUTO_TEST_CASE(SimpleTransposeQASymms8, SimpleTransposeTest) +ARMNN_AUTO_TEST_CASE(TransposeQASymmS8ValueSet1Test, TransposeValueSet1Test) +ARMNN_AUTO_TEST_CASE(TransposeQASymmS8ValueSet2Test, TransposeValueSet2Test) +ARMNN_AUTO_TEST_CASE(TransposeQASymmS8ValueSet3Test, TransposeValueSet3Test) ARMNN_AUTO_TEST_CASE(SimpleTransposeQASymm8, SimpleTransposeTest) ARMNN_AUTO_TEST_CASE(TransposeQASymm8ValueSet1Test, TransposeValueSet1Test) ARMNN_AUTO_TEST_CASE(TransposeQASymm8ValueSet2Test, TransposeValueSet2Test) @@ -1037,6 +1098,14 @@ ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dFloatNhwc, PaddedTransposeConvolution2dTest, true, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dInt8Nchw, + PaddedTransposeConvolution2dTest, + true, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dInt8Nhwc, + PaddedTransposeConvolution2dTest, + true, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dUint8Nchw, PaddedTransposeConvolution2dTest, true, @@ -1116,6 +1185,13 @@ ARMNN_AUTO_TEST_CASE(ArgMaxChannel, ArgMaxChannelTest) ARMNN_AUTO_TEST_CASE(ArgMaxHeight, ArgMaxHeightTest) ARMNN_AUTO_TEST_CASE(ArgMinWidth, ArgMinWidthTest) +ARMNN_AUTO_TEST_CASE(ArgMinQAsymmS8, ArgMinSimpleTest) +ARMNN_AUTO_TEST_CASE(ArgMaxQAsymmS8, ArgMaxSimpleTest) +ARMNN_AUTO_TEST_CASE(ArgMinChannelQAsymmS8, ArgMinChannelTest) +ARMNN_AUTO_TEST_CASE(ArgMaxChannelQAsymmS8, ArgMaxChannelTest) +ARMNN_AUTO_TEST_CASE(ArgMaxHeightQAsymmS8, ArgMaxHeightTest) +ARMNN_AUTO_TEST_CASE(ArgMinWidthQAsymmS8, ArgMinWidthTest) + ARMNN_AUTO_TEST_CASE(ArgMinQAsymm8, ArgMinSimpleTest) ARMNN_AUTO_TEST_CASE(ArgMaxQAsymm8, ArgMaxSimpleTest) ARMNN_AUTO_TEST_CASE(ArgMinChannelQAsymm8, ArgMinChannelTest) diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp index 25d639a38a..65ae14ff40 100644 --- a/src/backends/reference/RefLayerSupport.cpp +++ b/src/backends/reference/RefLayerSupport.cpp @@ -181,10 +181,11 @@ bool RefLayerSupport::IsArgMinMaxSupported(const armnn::TensorInfo &input, const { IgnoreUnused(descriptor); - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16, DataType::Signed32 @@ -211,11 +212,12 @@ bool RefLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input, { IgnoreUnused(descriptor); - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -260,11 +262,12 @@ bool RefLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input, std::string outputTensorStr = "output"; // Define supported types. - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -302,12 +305,13 @@ bool RefLayerSupport::IsComparisonSupported(const TensorInfo& input0, Optional reasonIfUnsupported) const { IgnoreUnused(descriptor); - std::array supportedInputTypes = + std::array supportedInputTypes = { DataType::Boolean, DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16, DataType::Signed32 @@ -339,8 +343,8 @@ bool RefLayerSupport::IsConcatSupported(const std::vector inp DataType::BFloat16, DataType::Float32, DataType::Float16, - DataType::QAsymmU8, DataType::QAsymmS8, + DataType::QAsymmU8, DataType::QSymmS16 }; @@ -366,11 +370,11 @@ bool RefLayerSupport::IsConstantSupported(const TensorInfo& output, { DataType::BFloat16, DataType::Float32, - DataType::Signed32, - DataType::QAsymmU8, DataType::QAsymmS8, + DataType::QAsymmU8, DataType::QSymmS8, - DataType::QSymmS16 + DataType::QSymmS16, + DataType::Signed32 }; return CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported, @@ -462,8 +466,8 @@ bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input, DataType::BFloat16, DataType::Float32, DataType::Float16, - DataType::QAsymmU8, DataType::QAsymmS8, + DataType::QAsymmU8, DataType::QSymmS8, DataType::QSymmS16 }; @@ -495,9 +499,9 @@ bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input, ARMNN_NO_DEPRECATE_WARN_BEGIN std::array supportedWeightTypes = { + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS8, - DataType::QAsymmS8, DataType::QuantizedSymm8PerAxis // deprecated }; ARMNN_NO_DEPRECATE_WARN_END @@ -543,8 +547,8 @@ bool RefLayerSupport::IsDebugSupported(const TensorInfo& input, DataType::BFloat16, DataType::Float16, DataType::Float32, - DataType::QAsymmU8, DataType::QAsymmS8, + DataType::QAsymmU8, DataType::QSymmS8, DataType::QSymmS16, DataType::Signed32 @@ -570,11 +574,12 @@ bool RefLayerSupport::IsDepthToSpaceSupported(const TensorInfo& input, IgnoreUnused(descriptor); bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -598,6 +603,7 @@ bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input, const Optional& biases, Optional reasonIfUnsupported) const { + IgnoreUnused(descriptor); bool supported = true; // Define supported types. @@ -606,9 +612,9 @@ bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input, DataType::BFloat16, DataType::Float32, DataType::Float16, - DataType::QSymmS8, DataType::QAsymmS8, DataType::QAsymmU8, + DataType::QSymmS8, DataType::QSymmS16 }; @@ -621,21 +627,22 @@ bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input, supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported, "Reference DepthwiseConvolution2d: input and output types mismatched."); - ARMNN_NO_DEPRECATE_WARN_BEGIN - std::array supportedWeightTypes = - { - DataType::QAsymmU8, - DataType::QSymmS8, - DataType::QuantizedSymm8PerAxis // deprecated - }; - ARMNN_NO_DEPRECATE_WARN_END - const DataType inputType = input.GetDataType(); if (IsQuantized8BitType(inputType)) { + ARMNN_NO_DEPRECATE_WARN_BEGIN + std::array supportedWeightTypes = + { + DataType::QAsymmS8, + DataType::QAsymmU8, + DataType::QSymmS8, + DataType::QuantizedSymm8PerAxis // deprecated + }; + ARMNN_NO_DEPRECATE_WARN_END supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported, - "Reference convolution2d: weights type not supported for quantized input."); + "Reference DepthwiseConvolution2d: weights type not supported for " + "quantized input."); } else { @@ -658,7 +665,6 @@ bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input, supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported, "Reference DepthwiseConvolution2d: biases is not a supported type."); } - IgnoreUnused(descriptor); return supported; @@ -716,10 +722,11 @@ bool RefLayerSupport::IsDetectionPostProcessSupported(const TensorInfo& boxEncod bool supported = true; - std::array supportedInputTypes = + std::array supportedInputTypes = { DataType::BFloat16, DataType::Float32, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -750,10 +757,11 @@ bool RefLayerSupport::IsDivisionSupported(const TensorInfo& input0, { bool supported = true; - std::array supportedTypes = { + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -786,11 +794,12 @@ bool RefLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input, { IgnoreUnused(descriptor); - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16, DataType::Signed32 @@ -883,8 +892,8 @@ bool RefLayerSupport::IsFullyConnectedSupported(const TensorInfo& input, DataType::BFloat16, DataType::Float32, DataType::Float16, - DataType::QAsymmU8, DataType::QAsymmS8, + DataType::QAsymmU8, DataType::QSymmS16 }; @@ -913,8 +922,9 @@ bool RefLayerSupport::IsFullyConnectedSupported(const TensorInfo& input, } ARMNN_NO_DEPRECATE_WARN_BEGIN - std::array supportedWeightTypes = + std::array supportedWeightTypes = { + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS8, DataType::QuantizedSymm8PerAxis // deprecated @@ -969,11 +979,12 @@ bool RefLayerSupport::IsGatherSupported(const armnn::TensorInfo& input0, armnn::Optional reasonIfUnsupported) const { bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1050,11 +1061,12 @@ bool RefLayerSupport::IsL2NormalizationSupported(const TensorInfo& input, { IgnoreUnused(descriptor); // Define supported types - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1261,11 +1273,12 @@ bool RefLayerSupport::IsMeanSupported(const TensorInfo& input, std::string meanLayerStr = "Mean"; std::string outputTensorStr = "output"; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1328,11 +1341,12 @@ bool RefLayerSupport::IsMemCopySupported(const TensorInfo &input, { bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16, DataType::Boolean @@ -1357,10 +1371,11 @@ bool RefLayerSupport::IsMinimumSupported(const TensorInfo& input0, { bool supported = true; - std::array supportedTypes = { + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1397,8 +1412,8 @@ bool RefLayerSupport::IsMultiplicationSupported(const TensorInfo& input0, DataType::BFloat16, DataType::Float32, DataType::Float16, - DataType::QAsymmU8, DataType::QAsymmS8, + DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1431,11 +1446,12 @@ bool RefLayerSupport::IsNormalizationSupported(const TensorInfo& input, IgnoreUnused(descriptor); // Define supported types - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float16, DataType::Float32, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1470,11 +1486,12 @@ bool RefLayerSupport::IsPadSupported(const TensorInfo& input, bool supported = true; // Define supported output and inputs types. - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1500,11 +1517,12 @@ bool RefLayerSupport::IsPermuteSupported(const TensorInfo& input, bool supported = true; // Define supported output and inputs types. - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1574,8 +1592,8 @@ bool RefLayerSupport::IsQuantizeSupported(const TensorInfo& input, // Define supported output types. std::array supportedOutputTypes = { - DataType::QAsymmU8, DataType::QAsymmS8, + DataType::QAsymmU8, DataType::QSymmS8, DataType::QSymmS16 }; @@ -1616,11 +1634,12 @@ bool RefLayerSupport::IsResizeBilinearSupported(const TensorInfo& input, Optional reasonIfUnsupported) const { bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1649,8 +1668,8 @@ bool RefLayerSupport::IsResizeSupported(const TensorInfo& input, DataType::BFloat16, DataType::Float32, DataType::Float16, - DataType::QAsymmU8, DataType::QAsymmS8, + DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1684,10 +1703,11 @@ bool RefLayerSupport::IsSliceSupported(const TensorInfo& input, IgnoreUnused(descriptor); bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1741,11 +1761,12 @@ bool RefLayerSupport::IsSpaceToBatchNdSupported(const TensorInfo& input, { IgnoreUnused(descriptor); bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1771,11 +1792,12 @@ bool RefLayerSupport::IsSpaceToDepthSupported(const TensorInfo& input, IgnoreUnused(descriptor); bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1798,11 +1820,12 @@ bool RefLayerSupport::IsSplitterSupported(const TensorInfo& input, { IgnoreUnused(descriptor); bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1820,11 +1843,12 @@ bool RefLayerSupport::IsSplitterSupported(const TensorInfo& input, { IgnoreUnused(descriptor); bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1851,11 +1875,12 @@ bool RefLayerSupport::IsStackSupported(const std::vector& inp IgnoreUnused(descriptor); bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1883,10 +1908,11 @@ bool RefLayerSupport::IsStridedSliceSupported(const TensorInfo& input, IgnoreUnused(descriptor); bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1910,10 +1936,11 @@ bool RefLayerSupport::IsSubtractionSupported(const TensorInfo& input0, { bool supported = true; - std::array supportedTypes = { + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1946,11 +1973,12 @@ bool RefLayerSupport::IsPreluSupported(const TensorInfo& input, { bool supported = true; - std::array supportedTypes + std::array supportedTypes { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1983,12 +2011,14 @@ bool RefLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input, IgnoreUnused(descriptor); bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, + DataType::QSymmS8, DataType::QSymmS16 }; @@ -2003,11 +2033,12 @@ bool RefLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input, const DataType inputType = input.GetDataType(); - if (inputType == DataType::QAsymmU8) + if (IsQuantized8BitType(inputType)) { ARMNN_NO_DEPRECATE_WARN_BEGIN - std::array supportedWeightTypes = + std::array supportedWeightTypes = { + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS8, DataType::QuantizedSymm8PerAxis //Deprecated @@ -2052,11 +2083,12 @@ bool RefLayerSupport::IsTransposeSupported(const TensorInfo& input, bool supported = true; // Define supported output and inputs types. - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp index 5d3775a59d..4566fe5e40 100644 --- a/src/backends/reference/RefWorkloadFactory.cpp +++ b/src/backends/reference/RefWorkloadFactory.cpp @@ -468,6 +468,10 @@ std::unique_ptr RefWorkloadFactory::CreatePad(const PadQueueDescripto { return std::make_unique(descriptor, info); } + else if (IsQAsymmS8(info)) + { + return std::make_unique(descriptor, info); + } return MakeWorkload(descriptor, info); } @@ -482,6 +486,10 @@ std::unique_ptr RefWorkloadFactory::CreatePermute(const PermuteQueueD { return std::make_unique(descriptor, info); } + else if (IsQAsymmS8(info)) + { + return std::make_unique(descriptor, info); + } return MakeWorkloadHelper(descriptor, info); } @@ -603,6 +611,10 @@ std::unique_ptr RefWorkloadFactory::CreateTranspose(const TransposeQu { return std::make_unique(descriptor, info); } + else if (IsQAsymmS8(info)) + { + return std::make_unique(descriptor, info); + } return MakeWorkloadHelper(descriptor, info); } diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp index bcace79493..f50051aaac 100644 --- a/src/backends/reference/test/RefLayerTests.cpp +++ b/src/backends/reference/test/RefLayerTests.cpp @@ -86,6 +86,14 @@ ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Nhwc, Convolution2d3x3Dilation3x3Test, false, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Int8, + Convolution2d3x3Dilation3x3Test, + false, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3NhwcInt8, + Convolution2d3x3Dilation3x3Test, + false, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Uint8, Convolution2d3x3Dilation3x3Test, false, @@ -119,6 +127,14 @@ ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Nhwc, Convolution2d2x3x3Dilation3x3Test, false, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Int8, + Convolution2d2x3x3Dilation3x3Test, + false, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3NhwcInt8, + Convolution2d2x3x3Dilation3x3Test, + false, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Uint8, Convolution2d2x3x3Dilation3x3Test, false, @@ -152,6 +168,14 @@ ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Nhwc, Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test, false, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Int8, + Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test, + false, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcInt8, + Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test, + false, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Uint8, Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test, false, @@ -217,6 +241,14 @@ ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3NhwcBFloat16, DepthwiseConvolution2d3x3Dilation3x3Test, false, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3Int8, + DepthwiseConvolution2d3x3Dilation3x3Test, + false, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3NhwcInt8, + DepthwiseConvolution2d3x3Dilation3x3Test, + false, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3Uint8, DepthwiseConvolution2d3x3Dilation3x3Test, false, @@ -250,6 +282,14 @@ ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3NhwcBFloat16, DepthwiseConvolution2d2x3x3Dilation3x3Test, false, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3Int8, + DepthwiseConvolution2d2x3x3Dilation3x3Test, + false, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3NhwcInt8, + DepthwiseConvolution2d2x3x3Dilation3x3Test, + false, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3Uint8, DepthwiseConvolution2d2x3x3Dilation3x3Test, false, @@ -743,6 +783,9 @@ ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearFloat16, SimpleResizeBilinearTest, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearInt8, + SimpleResizeBilinearTest, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8, SimpleResizeBilinearTest, DataLayout::NCHW) @@ -755,6 +798,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearNop, ARMNN_AUTO_TEST_CASE(ResizeBilinearNopFloat16, ResizeBilinearNopTest, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearNopInt8, + ResizeBilinearNopTest, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8, ResizeBilinearNopTest, DataLayout::NCHW) @@ -767,6 +813,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin, ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinFloat16, ResizeBilinearSqMinTest, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinInt8, + ResizeBilinearSqMinTest, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8, ResizeBilinearSqMinTest, DataLayout::NCHW) @@ -779,6 +828,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMin, ARMNN_AUTO_TEST_CASE(ResizeBilinearMinFloat16, ResizeBilinearMinTest, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMinInt8, + ResizeBilinearMinTest, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8, ResizeBilinearMinTest, DataLayout::NCHW) @@ -791,6 +843,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMag, ARMNN_AUTO_TEST_CASE(ResizeBilinearMagFloat16, ResizeBilinearMagTest, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMagInt8, + ResizeBilinearMagTest, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8, ResizeBilinearMagTest, DataLayout::NCHW) @@ -805,6 +860,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc, ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwcFloat16, ResizeBilinearNopTest, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearNopInt8Nhwc, + ResizeBilinearNopTest, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8Nhwc, ResizeBilinearNopTest, DataLayout::NHWC) @@ -817,6 +875,9 @@ ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc, ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwcFloat16, SimpleResizeBilinearTest, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearInt8Nhwc, + SimpleResizeBilinearTest, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8Nhwc, SimpleResizeBilinearTest, DataLayout::NHWC) @@ -829,6 +890,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc, ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwcFloat16, ResizeBilinearSqMinTest, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinInt8Nhwc, + ResizeBilinearSqMinTest, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8Nhwc, ResizeBilinearSqMinTest, DataLayout::NHWC) @@ -841,6 +905,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc, ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwcFloat16, ResizeBilinearMinTest, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMinInt8Nhwc, + ResizeBilinearMinTest, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8Nhwc, ResizeBilinearMinTest, DataLayout::NHWC) @@ -853,6 +920,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc, ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwcFloat16, ResizeBilinearMagTest, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMagInt8Nhwc, + ResizeBilinearMagTest, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8Nhwc, ResizeBilinearMagTest, DataLayout::NHWC) @@ -864,6 +934,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint16Nhwc, ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighbor, SimpleResizeNearestNeighborTest, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborInt8, + SimpleResizeNearestNeighborTest, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8, SimpleResizeNearestNeighborTest, DataLayout::NCHW) @@ -873,6 +946,9 @@ ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint16, ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNop, ResizeNearestNeighborNopTest, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopInt8, + ResizeNearestNeighborNopTest, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8, ResizeNearestNeighborNopTest, DataLayout::NCHW) @@ -882,6 +958,9 @@ ARMNN_AUTO_TEST_CASE(esizeNearestNeighborNopUint16, ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMin, ResizeNearestNeighborSqMinTest, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinInt8, + ResizeNearestNeighborSqMinTest, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8, ResizeNearestNeighborSqMinTest, DataLayout::NCHW) @@ -891,6 +970,9 @@ ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint16, ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMin, ResizeNearestNeighborMinTest, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinInt8, + ResizeNearestNeighborMinTest, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8, ResizeNearestNeighborMinTest, DataLayout::NCHW) @@ -900,6 +982,9 @@ ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint16, ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMag, ResizeNearestNeighborMagTest, DataLayout::NCHW, 0.10f, 50, 0.11f, 20) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagInt8, + ResizeNearestNeighborMagTest, + DataLayout::NCHW, 0.10f, 50, 0.11f, 20) ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8, ResizeNearestNeighborMagTest, DataLayout::NCHW, 0.10f, 50, 0.11f, 20) @@ -911,6 +996,9 @@ ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint16, ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopNhwc, ResizeNearestNeighborNopTest, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopInt8Nhwc, + ResizeNearestNeighborNopTest, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8Nhwc, ResizeNearestNeighborNopTest, DataLayout::NHWC) @@ -920,6 +1008,9 @@ ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint16Nhwc, ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborNhwc, SimpleResizeNearestNeighborTest, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborInt8Nhwc, + SimpleResizeNearestNeighborTest, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8Nhwc, SimpleResizeNearestNeighborTest, DataLayout::NHWC) @@ -929,6 +1020,9 @@ ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint16Nhwc, ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinNhwc, ResizeNearestNeighborSqMinTest, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinInt8Nhwc, + ResizeNearestNeighborSqMinTest, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8Nhwc, ResizeNearestNeighborSqMinTest, DataLayout::NHWC) @@ -938,6 +1032,9 @@ ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint16Nhwc, ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinNhwc, ResizeNearestNeighborMinTest, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinInt8Nhwc, + ResizeNearestNeighborMinTest, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8Nhwc, ResizeNearestNeighborMinTest, DataLayout::NHWC) @@ -947,6 +1044,9 @@ ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint16Nhwc, ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagNhwc, ResizeNearestNeighborMagTest, DataLayout::NHWC, 0.10f, 50, 0.11f, 20) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagInt8Nhwc, + ResizeNearestNeighborMagTest, + DataLayout::NHWC, 0.10f, 50, 0.11f, 20) ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8Nhwc, ResizeNearestNeighborMagTest, DataLayout::NHWC, 0.10f, 50, 0.11f, 20) @@ -1083,6 +1183,7 @@ ARMNN_AUTO_TEST_CASE(SimpleFloorQuantisedSymm16, SimpleFloorTest) +ARMNN_AUTO_TEST_CASE(SimpleReshapeQuantisedAsymmS8, SimpleReshapeTest) ARMNN_AUTO_TEST_CASE(SimpleReshapeQuantisedAsymm8, SimpleReshapeTest) ARMNN_AUTO_TEST_CASE(SimpleReshapeQuantisedSymm16, SimpleReshapeTest) ARMNN_AUTO_TEST_CASE(Reshape5d, Reshape5dTest) @@ -1094,6 +1195,8 @@ ARMNN_AUTO_TEST_CASE(RsqrtZero, RsqrtZeroTest) ARMNN_AUTO_TEST_CASE(RsqrtNegative, RsqrtNegativeTest) ARMNN_AUTO_TEST_CASE(Rsqrt2dFloat16, Rsqrt2dTest) ARMNN_AUTO_TEST_CASE(Rsqrt3dFloat16, Rsqrt3dTest) +ARMNN_AUTO_TEST_CASE(Rsqrt2dQuantisedAsymmS8, Rsqrt2dTest) +ARMNN_AUTO_TEST_CASE(Rsqrt3dQuantisedAsymmS8, Rsqrt3dTest) ARMNN_AUTO_TEST_CASE(Rsqrt2dQuantisedAsymm8, Rsqrt2dTest) ARMNN_AUTO_TEST_CASE(Rsqrt3dQuantisedAsymm8, Rsqrt3dTest) ARMNN_AUTO_TEST_CASE(Rsqrt2dQuantisedSymm16, Rsqrt2dTest) @@ -1108,6 +1211,10 @@ ARMNN_AUTO_TEST_CASE(SimplePermuteFloat32, SimplePermuteTest) ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet1Test, PermuteValueSet1Test) ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet2Test, PermuteValueSet2Test) ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet3Test, PermuteValueSet3Test) +ARMNN_AUTO_TEST_CASE(SimplePermuteQASymS8, SimplePermuteTest) +ARMNN_AUTO_TEST_CASE(PermuteQASymmS8ValueSet1Test, PermuteValueSet1Test) +ARMNN_AUTO_TEST_CASE(PermuteQASymmS8ValueSet2Test, PermuteValueSet2Test) +ARMNN_AUTO_TEST_CASE(PermuteQASymmS8ValueSet3Test, PermuteValueSet3Test) ARMNN_AUTO_TEST_CASE(SimplePermuteQASymm8, SimplePermuteTest) ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet1Test, PermuteValueSet1Test) ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet2Test, PermuteValueSet2Test) @@ -1168,6 +1275,14 @@ ARMNN_AUTO_TEST_CASE(MeanVts1Float32, MeanVts1Test) ARMNN_AUTO_TEST_CASE(MeanVts2Float32, MeanVts2Test) ARMNN_AUTO_TEST_CASE(MeanVts3Float32, MeanVts3Test) +ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedAsymmS8, MeanSimpleTest) +ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedAsymmS8, MeanSimpleAxisTest) +ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedAsymmS8, MeanKeepDimsTest) +ARMNN_AUTO_TEST_CASE(MeanMultipleDimsQuantisedAsymmS8, MeanMultipleDimsTest) +ARMNN_AUTO_TEST_CASE(MeanVts1QuantisedAsymmS8, MeanVts1Test) +ARMNN_AUTO_TEST_CASE(MeanVts2QuantisedAsymmS8, MeanVts2Test) +ARMNN_AUTO_TEST_CASE(MeanVts3QuantisedAsymmS8, MeanVts3Test) + ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedAsymm8, MeanSimpleTest) ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedAsymm8, MeanSimpleAxisTest) ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedAsymm8, MeanKeepDimsTest) @@ -1201,6 +1316,11 @@ ARMNN_AUTO_TEST_CASE(ArgMaxChannelSigned32, ArgMaxChannelTest) ARMNN_AUTO_TEST_CASE(ArgMinWidthSigned32, ArgMinWidthTest) +ARMNN_AUTO_TEST_CASE(ArgMaxSimpleQuantisedAsymmS8, ArgMaxSimpleTest) +ARMNN_AUTO_TEST_CASE(ArgMinSimpleQuantisedAsymmS8, ArgMinSimpleTest) +ARMNN_AUTO_TEST_CASE(ArgMinChannelQuantisedAsymmS8, ArgMinChannelTest) +ARMNN_AUTO_TEST_CASE(ArgMaxChannelQuantisedAsymmS8, ArgMaxChannelTest) + ARMNN_AUTO_TEST_CASE(ArgMaxSimpleQuantisedAsymm8, ArgMaxSimpleTest) ARMNN_AUTO_TEST_CASE(ArgMinSimpleQuantisedAsymm8, ArgMinSimpleTest) ARMNN_AUTO_TEST_CASE(ArgMinChannelQuantisedAsymm8, ArgMinChannelTest) @@ -1269,6 +1389,14 @@ ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat16_5, BatchToSpaceNdNhwcTest5) ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat16_7, BatchToSpaceNdNhwcTest7) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt1, BatchToSpaceNdNhwcTest1) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt2, BatchToSpaceNdNhwcTest2) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt3, BatchToSpaceNdNhwcTest3) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt4, BatchToSpaceNdNhwcTest4) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt5, BatchToSpaceNdNhwcTest5) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt6, BatchToSpaceNdNhwcTest6) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt7, BatchToSpaceNdNhwcTest7) + ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint1, BatchToSpaceNdNhwcTest1) ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint2, BatchToSpaceNdNhwcTest2) ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint3, BatchToSpaceNdNhwcTest3) @@ -1293,6 +1421,14 @@ ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat16_5, BatchToSpaceNdNchwTest5) ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat16_7, BatchToSpaceNdNchwTest7) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt1, BatchToSpaceNdNchwTest1) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt2, BatchToSpaceNdNchwTest2) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt3, BatchToSpaceNdNchwTest3) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt4, BatchToSpaceNdNchwTest4) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt5, BatchToSpaceNdNchwTest5) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt6, BatchToSpaceNdNchwTest6) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt7, BatchToSpaceNdNchwTest7) + ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint1, BatchToSpaceNdNchwTest1) ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint2, BatchToSpaceNdNchwTest2) ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint3, BatchToSpaceNdNchwTest3) @@ -1320,6 +1456,11 @@ ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_2, DepthToSpaceTest2, DataLayout::NCHW); ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_4, DepthToSpaceTest4, DataLayout::NCHW); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_1, DepthToSpaceTest1, DataLayout::NCHW); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_2, DepthToSpaceTest2, DataLayout::NCHW); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_3, DepthToSpaceTest3, DataLayout::NCHW); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_4, DepthToSpaceTest4, DataLayout::NCHW); + ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_1, DepthToSpaceTest1, DataLayout::NCHW); ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_2, DepthToSpaceTest2, DataLayout::NCHW); ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_3, DepthToSpaceTest3, DataLayout::NCHW); @@ -1340,6 +1481,11 @@ ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_2, DepthToSpaceTest2, DataLayout::NHWC); ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_4, DepthToSpaceTest4, DataLayout::NHWC); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_1, DepthToSpaceTest1, DataLayout::NHWC); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_2, DepthToSpaceTest2, DataLayout::NHWC); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_3, DepthToSpaceTest3, DataLayout::NHWC); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_4, DepthToSpaceTest4, DataLayout::NHWC); + ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_1, DepthToSpaceTest1, DataLayout::NHWC); ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_2, DepthToSpaceTest2, DataLayout::NHWC); ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_3, DepthToSpaceTest3, DataLayout::NHWC); @@ -1468,6 +1614,8 @@ ARMNN_AUTO_TEST_CASE(Abs2dSigned32, Abs2dTest) ARMNN_AUTO_TEST_CASE(Abs3dSigned32, Abs3dTest) ARMNN_AUTO_TEST_CASE(AbsZeroSigned32, AbsZeroTest) +ARMNN_AUTO_TEST_CASE(Abs2dQuantisedAsymmS8, Abs2dTest) +ARMNN_AUTO_TEST_CASE(Abs3dQuantisedAsymmS8, Abs3dTest) ARMNN_AUTO_TEST_CASE(Abs2dQuantisedAsymm8, Abs2dTest) ARMNN_AUTO_TEST_CASE(Abs3dQuantisedAsymm8, Abs3dTest) ARMNN_AUTO_TEST_CASE(Abs2dQuantisedSymm16, Abs2dTest) @@ -1482,6 +1630,16 @@ BOOST_AUTO_TEST_CASE(DetectionPostProcessFastNmsFloat) { DetectionPostProcessFastNmsFloatTest(); } +BOOST_AUTO_TEST_CASE(DetectionPostProcessRegularNmsInt8) +{ + DetectionPostProcessRegularNmsQuantizedTest< + RefWorkloadFactory, DataType::QAsymmS8>(); +} +BOOST_AUTO_TEST_CASE(DetectionPostProcessFastNmsInt8) +{ + DetectionPostProcessRegularNmsQuantizedTest< + RefWorkloadFactory, DataType::QAsymmS8>(); +} BOOST_AUTO_TEST_CASE(DetectionPostProcessRegularNmsUint8) { DetectionPostProcessRegularNmsQuantizedTest< @@ -1552,6 +1710,10 @@ ARMNN_AUTO_TEST_CASE(SimpleTransposeFloat32, SimpleTransposeTest) ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet2Test, TransposeValueSet2Test) ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet3Test, TransposeValueSet3Test) +ARMNN_AUTO_TEST_CASE(SimpleTransposeQASymmS8, SimpleTransposeTest) +ARMNN_AUTO_TEST_CASE(TransposeQASymmS8ValueSet1Test, TransposeValueSet1Test) +ARMNN_AUTO_TEST_CASE(TransposeQASymmS8ValueSet2Test, TransposeValueSet2Test) +ARMNN_AUTO_TEST_CASE(TransposeQASymmS8ValueSet3Test, TransposeValueSet3Test) ARMNN_AUTO_TEST_CASE(SimpleTransposeQASymm8, SimpleTransposeTest) ARMNN_AUTO_TEST_CASE(TransposeQASymm8ValueSet1Test, TransposeValueSet1Test) ARMNN_AUTO_TEST_CASE(TransposeQASymm8ValueSet2Test, TransposeValueSet2Test) @@ -1570,6 +1732,14 @@ ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dFloatNhwc, SimpleTransposeConvolution2dTest, true, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dInt8Nchw, + SimpleTransposeConvolution2dTest, + true, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dInt8Nhwc, + SimpleTransposeConvolution2dTest, + true, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dUint8Nchw, SimpleTransposeConvolution2dTest, true, @@ -1595,6 +1765,14 @@ ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dFloatNhwc, SimpleTransposeConvolution2dTest, true, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dInt8Nchw, + SimpleTransposeConvolution2dTest, + true, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dInt8Nhwc, + SimpleTransposeConvolution2dTest, + true, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dUint8Nchw, SimpleTransposeConvolution2dTest, true, @@ -1620,6 +1798,14 @@ ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dFloatNhwc, PaddedTransposeConvolution2dTest, true, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dInt8Nchw, + PaddedTransposeConvolution2dTest, + true, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dInt8Nhwc, + PaddedTransposeConvolution2dTest, + true, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dUint8Nchw, PaddedTransposeConvolution2dTest, true, @@ -1645,6 +1831,14 @@ ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dFloatNhwc, PaddedTransposeConvolution2dTest, true, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dInt8Nchw, + PaddedTransposeConvolution2dTest, + true, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dInt8Nhwc, + PaddedTransposeConvolution2dTest, + true, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dUint8Nchw, PaddedTransposeConvolution2dTest, true, @@ -1670,6 +1864,14 @@ ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dFloatNhwc, StridedTransposeConvolution2dTest, true, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dInt8Nchw, + StridedTransposeConvolution2dTest, + true, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dInt8Nhwc, + StridedTransposeConvolution2dTest, + true, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dUint8Nchw, StridedTransposeConvolution2dTest, true, @@ -1695,6 +1897,14 @@ ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dFloatNhwc, StridedTransposeConvolution2dTest, true, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dInt8Nchw, + StridedTransposeConvolution2dTest, + true, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dInt8Nhwc, + StridedTransposeConvolution2dTest, + true, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dUint8Nchw, StridedTransposeConvolution2dTest, true, @@ -1718,6 +1928,12 @@ ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dFloatNchw, ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dFloatNhwc, MultiChannelTransposeConvolution2dTest, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dInt8Nchw, + MultiChannelTransposeConvolution2dTest, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dInt8Nhwc, + MultiChannelTransposeConvolution2dTest, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dUint8Nchw, MultiChannelTransposeConvolution2dTest, DataLayout::NCHW) @@ -1754,6 +1970,8 @@ ARMNN_AUTO_TEST_CASE(NegZero, NegZeroTest) ARMNN_AUTO_TEST_CASE(NegNegative, NegNegativeTest) ARMNN_AUTO_TEST_CASE(Neg2dFloat16, Neg2dTest) ARMNN_AUTO_TEST_CASE(Neg3dFloat16, Neg3dTest) +ARMNN_AUTO_TEST_CASE(Neg2dQuantisedAsymmS8, Neg2dTest) +ARMNN_AUTO_TEST_CASE(Neg3dQuantisedAsymmS8, Neg3dTest) ARMNN_AUTO_TEST_CASE(Neg2dQuantisedAsymm8, Neg2dTest) ARMNN_AUTO_TEST_CASE(Neg3dQuantisedAsymm8, Neg3dTest) ARMNN_AUTO_TEST_CASE(Neg2dQuantisedSymm16, Neg2dTest) diff --git a/src/backends/reference/workloads/Pad.cpp b/src/backends/reference/workloads/Pad.cpp index ffdd469609..1b634145fc 100644 --- a/src/backends/reference/workloads/Pad.cpp +++ b/src/backends/reference/workloads/Pad.cpp @@ -177,6 +177,12 @@ template void Pad(const TensorInfo& inputInfo, const uint8_t* inputData, uint8_t* outData, const float padValue); +template void Pad(const TensorInfo& inputInfo, + const TensorInfo& outputInfo, + std::vector> m_PadList, + const int8_t* inputData, + int8_t* outData, + const float padValue); template void Pad(const TensorInfo& inputInfo, const TensorInfo& outputInfo, std::vector> m_PadList, diff --git a/src/backends/reference/workloads/RefPadWorkload.cpp b/src/backends/reference/workloads/RefPadWorkload.cpp index 777682d70c..6f82d5ffdb 100644 --- a/src/backends/reference/workloads/RefPadWorkload.cpp +++ b/src/backends/reference/workloads/RefPadWorkload.cpp @@ -36,6 +36,7 @@ void RefPadWorkload::Execute() const template class RefPadWorkload; template class RefPadWorkload; template class RefPadWorkload; +template class RefPadWorkload; template class RefPadWorkload; template class RefPadWorkload; diff --git a/src/backends/reference/workloads/RefPadWorkload.hpp b/src/backends/reference/workloads/RefPadWorkload.hpp index 5134ac8bff..74dcab1967 100644 --- a/src/backends/reference/workloads/RefPadWorkload.hpp +++ b/src/backends/reference/workloads/RefPadWorkload.hpp @@ -31,9 +31,10 @@ public: }; using RefPadBFloat16Workload = RefPadWorkload; -using RefPadFloat32Workload = RefPadWorkload; -using RefPadFloat16Workload = RefPadWorkload; -using RefPadQAsymm8Workload = RefPadWorkload; -using RefPadQSymm16Workload = RefPadWorkload; +using RefPadFloat32Workload = RefPadWorkload; +using RefPadFloat16Workload = RefPadWorkload; +using RefPadQAsymmS8Workload = RefPadWorkload; +using RefPadQAsymm8Workload = RefPadWorkload; +using RefPadQSymm16Workload = RefPadWorkload; } //namespace armnn diff --git a/src/backends/reference/workloads/RefPermuteWorkload.cpp b/src/backends/reference/workloads/RefPermuteWorkload.cpp index 5751ed80a3..75e9d0acf0 100644 --- a/src/backends/reference/workloads/RefPermuteWorkload.cpp +++ b/src/backends/reference/workloads/RefPermuteWorkload.cpp @@ -31,6 +31,7 @@ void RefPermuteWorkload::Execute() const template class RefPermuteWorkload; template class RefPermuteWorkload; template class RefPermuteWorkload; +template class RefPermuteWorkload; template class RefPermuteWorkload; template class RefPermuteWorkload; diff --git a/src/backends/reference/workloads/RefPermuteWorkload.hpp b/src/backends/reference/workloads/RefPermuteWorkload.hpp index a8d308e47c..b9f259a8f8 100644 --- a/src/backends/reference/workloads/RefPermuteWorkload.hpp +++ b/src/backends/reference/workloads/RefPermuteWorkload.hpp @@ -28,9 +28,10 @@ public: }; using RefPermuteBFloat16Workload = RefPermuteWorkload; -using RefPermuteFloat16Workload = RefPermuteWorkload; -using RefPermuteFloat32Workload = RefPermuteWorkload; -using RefPermuteQAsymm8Workload = RefPermuteWorkload; -using RefPermuteQSymm16Workload = RefPermuteWorkload; +using RefPermuteFloat16Workload = RefPermuteWorkload; +using RefPermuteFloat32Workload = RefPermuteWorkload; +using RefPermuteQAsymmS8Workload = RefPermuteWorkload; +using RefPermuteQAsymm8Workload = RefPermuteWorkload; +using RefPermuteQSymm16Workload = RefPermuteWorkload; } //namespace armnn \ No newline at end of file diff --git a/src/backends/reference/workloads/RefTransposeWorkload.cpp b/src/backends/reference/workloads/RefTransposeWorkload.cpp index 242668b6b1..4e027bee2e 100644 --- a/src/backends/reference/workloads/RefTransposeWorkload.cpp +++ b/src/backends/reference/workloads/RefTransposeWorkload.cpp @@ -30,6 +30,7 @@ void RefTransposeWorkload::Execute() const template class RefTransposeWorkload; template class RefTransposeWorkload; template class RefTransposeWorkload; +template class RefTransposeWorkload; template class RefTransposeWorkload; template class RefTransposeWorkload; diff --git a/src/backends/reference/workloads/RefTransposeWorkload.hpp b/src/backends/reference/workloads/RefTransposeWorkload.hpp index dcfe618b75..387572aab9 100644 --- a/src/backends/reference/workloads/RefTransposeWorkload.hpp +++ b/src/backends/reference/workloads/RefTransposeWorkload.hpp @@ -30,6 +30,7 @@ public: using RefTransposeBFloat16Workload = RefTransposeWorkload; using RefTransposeFloat16Workload = RefTransposeWorkload; using RefTransposeFloat32Workload = RefTransposeWorkload; +using RefTransposeQAsymmS8Workload = RefTransposeWorkload; using RefTransposeQAsymm8Workload = RefTransposeWorkload; using RefTransposeQSymm16Workload = RefTransposeWorkload; -- cgit v1.2.1