diff options
author | Sadik Armagan <sadik.armagan@arm.com> | 2020-04-17 12:45:14 +0100 |
---|---|---|
committer | TeresaARM <teresa.charlinreyes@arm.com> | 2020-04-27 14:49:42 +0000 |
commit | 303980c502c721f13d65e7087be6c0758df65044 (patch) | |
tree | f1a9ab898b3121b988b8328161eddeb6a608e73f /src/backends/reference | |
parent | 49c52a1e3be742cd7785ccc36c31cbbe495c4003 (diff) | |
download | armnn-303980c502c721f13d65e7087be6c0758df65044.tar.gz |
IVGCVSW-4668 Add TENSOR_QUANT8_ASYMM_SIGNED data type support to CpuRef operators
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: I094125ba80699cc3cf5226bda6662a54e6caa988
Diffstat (limited to 'src/backends/reference')
10 files changed, 336 insertions, 62 deletions
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp index 25d639a38a..65ae14ff40 100644 --- a/src/backends/reference/RefLayerSupport.cpp +++ b/src/backends/reference/RefLayerSupport.cpp @@ -181,10 +181,11 @@ bool RefLayerSupport::IsArgMinMaxSupported(const armnn::TensorInfo &input, const { IgnoreUnused(descriptor); - std::array<DataType, 5> supportedTypes = + std::array<DataType, 6> supportedTypes = { DataType::BFloat16, DataType::Float32, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16, DataType::Signed32 @@ -211,11 +212,12 @@ bool RefLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input, { IgnoreUnused(descriptor); - std::array<DataType, 5> supportedTypes = + std::array<DataType, 6> supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -260,11 +262,12 @@ bool RefLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input, std::string outputTensorStr = "output"; // Define supported types. - std::array<DataType,5> supportedTypes = + std::array<DataType,6> supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -302,12 +305,13 @@ bool RefLayerSupport::IsComparisonSupported(const TensorInfo& input0, Optional<std::string&> reasonIfUnsupported) const { IgnoreUnused(descriptor); - std::array<DataType, 7> supportedInputTypes = + std::array<DataType, 8> supportedInputTypes = { DataType::Boolean, DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16, DataType::Signed32 @@ -339,8 +343,8 @@ bool RefLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inp DataType::BFloat16, DataType::Float32, DataType::Float16, - DataType::QAsymmU8, DataType::QAsymmS8, + DataType::QAsymmU8, DataType::QSymmS16 }; @@ -366,11 +370,11 @@ bool RefLayerSupport::IsConstantSupported(const TensorInfo& output, { DataType::BFloat16, DataType::Float32, - DataType::Signed32, - DataType::QAsymmU8, DataType::QAsymmS8, + DataType::QAsymmU8, DataType::QSymmS8, - DataType::QSymmS16 + DataType::QSymmS16, + DataType::Signed32 }; return CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported, @@ -462,8 +466,8 @@ bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input, DataType::BFloat16, DataType::Float32, DataType::Float16, - DataType::QAsymmU8, DataType::QAsymmS8, + DataType::QAsymmU8, DataType::QSymmS8, DataType::QSymmS16 }; @@ -495,9 +499,9 @@ bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input, ARMNN_NO_DEPRECATE_WARN_BEGIN std::array<DataType, 4> supportedWeightTypes = { + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS8, - DataType::QAsymmS8, DataType::QuantizedSymm8PerAxis // deprecated }; ARMNN_NO_DEPRECATE_WARN_END @@ -543,8 +547,8 @@ bool RefLayerSupport::IsDebugSupported(const TensorInfo& input, DataType::BFloat16, DataType::Float16, DataType::Float32, - DataType::QAsymmU8, DataType::QAsymmS8, + DataType::QAsymmU8, DataType::QSymmS8, DataType::QSymmS16, DataType::Signed32 @@ -570,11 +574,12 @@ bool RefLayerSupport::IsDepthToSpaceSupported(const TensorInfo& input, IgnoreUnused(descriptor); bool supported = true; - std::array<DataType,5> supportedTypes = + std::array<DataType,6> supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -598,6 +603,7 @@ bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input, const Optional<TensorInfo>& biases, Optional<std::string&> reasonIfUnsupported) const { + IgnoreUnused(descriptor); bool supported = true; // Define supported types. @@ -606,9 +612,9 @@ bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input, DataType::BFloat16, DataType::Float32, DataType::Float16, - DataType::QSymmS8, DataType::QAsymmS8, DataType::QAsymmU8, + DataType::QSymmS8, DataType::QSymmS16 }; @@ -621,21 +627,22 @@ bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input, supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported, "Reference DepthwiseConvolution2d: input and output types mismatched."); - ARMNN_NO_DEPRECATE_WARN_BEGIN - std::array<DataType, 3> supportedWeightTypes = - { - DataType::QAsymmU8, - DataType::QSymmS8, - DataType::QuantizedSymm8PerAxis // deprecated - }; - ARMNN_NO_DEPRECATE_WARN_END - const DataType inputType = input.GetDataType(); if (IsQuantized8BitType(inputType)) { + ARMNN_NO_DEPRECATE_WARN_BEGIN + std::array<DataType, 4> supportedWeightTypes = + { + DataType::QAsymmS8, + DataType::QAsymmU8, + DataType::QSymmS8, + DataType::QuantizedSymm8PerAxis // deprecated + }; + ARMNN_NO_DEPRECATE_WARN_END supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported, - "Reference convolution2d: weights type not supported for quantized input."); + "Reference DepthwiseConvolution2d: weights type not supported for " + "quantized input."); } else { @@ -658,7 +665,6 @@ bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input, supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported, "Reference DepthwiseConvolution2d: biases is not a supported type."); } - IgnoreUnused(descriptor); return supported; @@ -716,10 +722,11 @@ bool RefLayerSupport::IsDetectionPostProcessSupported(const TensorInfo& boxEncod bool supported = true; - std::array<DataType,4> supportedInputTypes = + std::array<DataType,5> supportedInputTypes = { DataType::BFloat16, DataType::Float32, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -750,10 +757,11 @@ bool RefLayerSupport::IsDivisionSupported(const TensorInfo& input0, { bool supported = true; - std::array<DataType,5> supportedTypes = { + std::array<DataType,6> supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -786,11 +794,12 @@ bool RefLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input, { IgnoreUnused(descriptor); - std::array<DataType, 6> supportedTypes = + std::array<DataType, 7> supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16, DataType::Signed32 @@ -883,8 +892,8 @@ bool RefLayerSupport::IsFullyConnectedSupported(const TensorInfo& input, DataType::BFloat16, DataType::Float32, DataType::Float16, - DataType::QAsymmU8, DataType::QAsymmS8, + DataType::QAsymmU8, DataType::QSymmS16 }; @@ -913,8 +922,9 @@ bool RefLayerSupport::IsFullyConnectedSupported(const TensorInfo& input, } ARMNN_NO_DEPRECATE_WARN_BEGIN - std::array<DataType, 3> supportedWeightTypes = + std::array<DataType, 4> supportedWeightTypes = { + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS8, DataType::QuantizedSymm8PerAxis // deprecated @@ -969,11 +979,12 @@ bool RefLayerSupport::IsGatherSupported(const armnn::TensorInfo& input0, armnn::Optional<std::string&> reasonIfUnsupported) const { bool supported = true; - std::array<DataType,5> supportedTypes = + std::array<DataType,6> supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1050,11 +1061,12 @@ bool RefLayerSupport::IsL2NormalizationSupported(const TensorInfo& input, { IgnoreUnused(descriptor); // Define supported types - std::array<DataType, 5> supportedTypes = + std::array<DataType, 6> supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1261,11 +1273,12 @@ bool RefLayerSupport::IsMeanSupported(const TensorInfo& input, std::string meanLayerStr = "Mean"; std::string outputTensorStr = "output"; - std::array<DataType,5> supportedTypes = + std::array<DataType,6> supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1328,11 +1341,12 @@ bool RefLayerSupport::IsMemCopySupported(const TensorInfo &input, { bool supported = true; - std::array<DataType,6> supportedTypes = + std::array<DataType,7> supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16, DataType::Boolean @@ -1357,10 +1371,11 @@ bool RefLayerSupport::IsMinimumSupported(const TensorInfo& input0, { bool supported = true; - std::array<DataType,5> supportedTypes = { + std::array<DataType,6> supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1397,8 +1412,8 @@ bool RefLayerSupport::IsMultiplicationSupported(const TensorInfo& input0, DataType::BFloat16, DataType::Float32, DataType::Float16, - DataType::QAsymmU8, DataType::QAsymmS8, + DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1431,11 +1446,12 @@ bool RefLayerSupport::IsNormalizationSupported(const TensorInfo& input, IgnoreUnused(descriptor); // Define supported types - std::array<DataType, 5> supportedTypes = + std::array<DataType, 6> supportedTypes = { DataType::BFloat16, DataType::Float16, DataType::Float32, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1470,11 +1486,12 @@ bool RefLayerSupport::IsPadSupported(const TensorInfo& input, bool supported = true; // Define supported output and inputs types. - std::array<DataType,5> supportedTypes = + std::array<DataType,6> supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1500,11 +1517,12 @@ bool RefLayerSupport::IsPermuteSupported(const TensorInfo& input, bool supported = true; // Define supported output and inputs types. - std::array<DataType, 5> supportedTypes = + std::array<DataType, 6> supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1574,8 +1592,8 @@ bool RefLayerSupport::IsQuantizeSupported(const TensorInfo& input, // Define supported output types. std::array<DataType,4> supportedOutputTypes = { - DataType::QAsymmU8, DataType::QAsymmS8, + DataType::QAsymmU8, DataType::QSymmS8, DataType::QSymmS16 }; @@ -1616,11 +1634,12 @@ bool RefLayerSupport::IsResizeBilinearSupported(const TensorInfo& input, Optional<std::string&> reasonIfUnsupported) const { bool supported = true; - std::array<DataType,5> supportedTypes = + std::array<DataType,6> supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1649,8 +1668,8 @@ bool RefLayerSupport::IsResizeSupported(const TensorInfo& input, DataType::BFloat16, DataType::Float32, DataType::Float16, - DataType::QAsymmU8, DataType::QAsymmS8, + DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1684,10 +1703,11 @@ bool RefLayerSupport::IsSliceSupported(const TensorInfo& input, IgnoreUnused(descriptor); bool supported = true; - std::array<DataType, 4> supportedTypes = + std::array<DataType, 5> supportedTypes = { DataType::BFloat16, DataType::Float32, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1741,11 +1761,12 @@ bool RefLayerSupport::IsSpaceToBatchNdSupported(const TensorInfo& input, { IgnoreUnused(descriptor); bool supported = true; - std::array<DataType,5> supportedTypes = + std::array<DataType,6> supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1771,11 +1792,12 @@ bool RefLayerSupport::IsSpaceToDepthSupported(const TensorInfo& input, IgnoreUnused(descriptor); bool supported = true; - std::array<DataType,5> supportedTypes = + std::array<DataType,6> supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1798,11 +1820,12 @@ bool RefLayerSupport::IsSplitterSupported(const TensorInfo& input, { IgnoreUnused(descriptor); bool supported = true; - std::array<DataType,5> supportedTypes = + std::array<DataType,6> supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1820,11 +1843,12 @@ bool RefLayerSupport::IsSplitterSupported(const TensorInfo& input, { IgnoreUnused(descriptor); bool supported = true; - std::array<DataType,5> supportedTypes = + std::array<DataType,6> supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1851,11 +1875,12 @@ bool RefLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inp IgnoreUnused(descriptor); bool supported = true; - std::array<DataType,5> supportedTypes = + std::array<DataType,6> supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1883,10 +1908,11 @@ bool RefLayerSupport::IsStridedSliceSupported(const TensorInfo& input, IgnoreUnused(descriptor); bool supported = true; - std::array<DataType,4> supportedTypes = + std::array<DataType,5> supportedTypes = { DataType::BFloat16, DataType::Float32, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1910,10 +1936,11 @@ bool RefLayerSupport::IsSubtractionSupported(const TensorInfo& input0, { bool supported = true; - std::array<DataType,5> supportedTypes = { + std::array<DataType,6> supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1946,11 +1973,12 @@ bool RefLayerSupport::IsPreluSupported(const TensorInfo& input, { bool supported = true; - std::array<DataType, 5> supportedTypes + std::array<DataType, 6> supportedTypes { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1983,12 +2011,14 @@ bool RefLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input, IgnoreUnused(descriptor); bool supported = true; - std::array<DataType,5> supportedTypes = + std::array<DataType,7> supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, + DataType::QSymmS8, DataType::QSymmS16 }; @@ -2003,11 +2033,12 @@ bool RefLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input, const DataType inputType = input.GetDataType(); - if (inputType == DataType::QAsymmU8) + if (IsQuantized8BitType(inputType)) { ARMNN_NO_DEPRECATE_WARN_BEGIN - std::array<DataType, 3> supportedWeightTypes = + std::array<DataType, 4> supportedWeightTypes = { + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS8, DataType::QuantizedSymm8PerAxis //Deprecated @@ -2052,11 +2083,12 @@ bool RefLayerSupport::IsTransposeSupported(const TensorInfo& input, bool supported = true; // Define supported output and inputs types. - std::array<DataType, 5> supportedTypes = + std::array<DataType, 6> supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp index 5d3775a59d..4566fe5e40 100644 --- a/src/backends/reference/RefWorkloadFactory.cpp +++ b/src/backends/reference/RefWorkloadFactory.cpp @@ -468,6 +468,10 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePad(const PadQueueDescripto { return std::make_unique<RefPadBFloat16Workload>(descriptor, info); } + else if (IsQAsymmS8(info)) + { + return std::make_unique<RefPadQAsymmS8Workload>(descriptor, info); + } return MakeWorkload<RefPadFloat32Workload, RefPadQAsymm8Workload>(descriptor, info); } @@ -482,6 +486,10 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePermute(const PermuteQueueD { return std::make_unique<RefPermuteBFloat16Workload>(descriptor, info); } + else if (IsQAsymmS8(info)) + { + return std::make_unique<RefPermuteQAsymmS8Workload>(descriptor, info); + } return MakeWorkloadHelper<RefPermuteFloat16Workload, RefPermuteFloat32Workload, RefPermuteQAsymm8Workload, NullWorkload, NullWorkload, NullWorkload>(descriptor, info); } @@ -603,6 +611,10 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateTranspose(const TransposeQu { return std::make_unique<RefTransposeBFloat16Workload>(descriptor, info); } + else if (IsQAsymmS8(info)) + { + return std::make_unique<RefTransposeQAsymmS8Workload>(descriptor, info); + } return MakeWorkloadHelper<RefTransposeFloat16Workload, RefTransposeFloat32Workload, RefTransposeQAsymm8Workload, NullWorkload, NullWorkload, NullWorkload>(descriptor, info); } diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp index bcace79493..f50051aaac 100644 --- a/src/backends/reference/test/RefLayerTests.cpp +++ b/src/backends/reference/test/RefLayerTests.cpp @@ -86,6 +86,14 @@ ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Nhwc, Convolution2d3x3Dilation3x3Test<DataType::Float32, DataType::Float32>, false, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Int8, + Convolution2d3x3Dilation3x3Test<DataType::QAsymmS8, DataType::Signed32>, + false, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3NhwcInt8, + Convolution2d3x3Dilation3x3Test<DataType::QAsymmS8, DataType::Signed32>, + false, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Uint8, Convolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>, false, @@ -119,6 +127,14 @@ ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Nhwc, Convolution2d2x3x3Dilation3x3Test<DataType::Float32, DataType::Float32>, false, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Int8, + Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmS8, DataType::Signed32>, + false, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3NhwcInt8, + Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmS8, DataType::Signed32>, + false, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Uint8, Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>, false, @@ -152,6 +168,14 @@ ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Nhwc, Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::Float32, DataType::Float32>, false, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Int8, + Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QAsymmS8, DataType::Signed32>, + false, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcInt8, + Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QAsymmS8, DataType::Signed32>, + false, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Uint8, Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QAsymmU8, DataType::Signed32>, false, @@ -217,6 +241,14 @@ ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3NhwcBFloat16, DepthwiseConvolution2d3x3Dilation3x3Test<DataType::BFloat16, DataType::BFloat16>, false, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3Int8, + DepthwiseConvolution2d3x3Dilation3x3Test<DataType::QAsymmS8, DataType::Signed32>, + false, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3NhwcInt8, + DepthwiseConvolution2d3x3Dilation3x3Test<DataType::QAsymmS8, DataType::Signed32>, + false, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3Uint8, DepthwiseConvolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>, false, @@ -250,6 +282,14 @@ ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3NhwcBFloat16, DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::BFloat16, DataType::BFloat16>, false, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3Int8, + DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::QAsymmS8, DataType::Signed32>, + false, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3NhwcInt8, + DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::QAsymmS8, DataType::Signed32>, + false, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3Uint8, DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>, false, @@ -743,6 +783,9 @@ ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearFloat16, SimpleResizeBilinearTest<DataType::Float16>, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearInt8, + SimpleResizeBilinearTest<DataType::QAsymmS8>, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8, SimpleResizeBilinearTest<DataType::QAsymmU8>, DataLayout::NCHW) @@ -755,6 +798,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearNop, ARMNN_AUTO_TEST_CASE(ResizeBilinearNopFloat16, ResizeBilinearNopTest<DataType::Float16>, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearNopInt8, + ResizeBilinearNopTest<DataType::QAsymmS8>, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8, ResizeBilinearNopTest<DataType::QAsymmU8>, DataLayout::NCHW) @@ -767,6 +813,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin, ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinFloat16, ResizeBilinearSqMinTest<DataType::Float16>, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinInt8, + ResizeBilinearSqMinTest<DataType::QAsymmS8>, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8, ResizeBilinearSqMinTest<DataType::QAsymmU8>, DataLayout::NCHW) @@ -779,6 +828,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMin, ARMNN_AUTO_TEST_CASE(ResizeBilinearMinFloat16, ResizeBilinearMinTest<DataType::Float16>, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMinInt8, + ResizeBilinearMinTest<DataType::QAsymmS8>, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8, ResizeBilinearMinTest<DataType::QAsymmU8>, DataLayout::NCHW) @@ -791,6 +843,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMag, ARMNN_AUTO_TEST_CASE(ResizeBilinearMagFloat16, ResizeBilinearMagTest<DataType::Float16>, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMagInt8, + ResizeBilinearMagTest<DataType::QAsymmS8>, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8, ResizeBilinearMagTest<DataType::QAsymmU8>, DataLayout::NCHW) @@ -805,6 +860,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc, ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwcFloat16, ResizeBilinearNopTest<DataType::Float16>, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearNopInt8Nhwc, + ResizeBilinearNopTest<DataType::QAsymmS8>, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8Nhwc, ResizeBilinearNopTest<DataType::QAsymmU8>, DataLayout::NHWC) @@ -817,6 +875,9 @@ ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc, ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwcFloat16, SimpleResizeBilinearTest<DataType::Float16>, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearInt8Nhwc, + SimpleResizeBilinearTest<DataType::QAsymmS8>, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8Nhwc, SimpleResizeBilinearTest<DataType::QAsymmU8>, DataLayout::NHWC) @@ -829,6 +890,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc, ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwcFloat16, ResizeBilinearSqMinTest<DataType::Float16>, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinInt8Nhwc, + ResizeBilinearSqMinTest<DataType::QAsymmS8>, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8Nhwc, ResizeBilinearSqMinTest<DataType::QAsymmU8>, DataLayout::NHWC) @@ -841,6 +905,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc, ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwcFloat16, ResizeBilinearMinTest<DataType::Float16>, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMinInt8Nhwc, + ResizeBilinearMinTest<DataType::QAsymmS8>, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8Nhwc, ResizeBilinearMinTest<DataType::QAsymmU8>, DataLayout::NHWC) @@ -853,6 +920,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc, ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwcFloat16, ResizeBilinearMagTest<DataType::Float16>, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMagInt8Nhwc, + ResizeBilinearMagTest<DataType::QAsymmS8>, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8Nhwc, ResizeBilinearMagTest<DataType::QAsymmU8>, DataLayout::NHWC) @@ -864,6 +934,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint16Nhwc, ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighbor, SimpleResizeNearestNeighborTest<DataType::Float32>, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborInt8, + SimpleResizeNearestNeighborTest<DataType::QAsymmS8>, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8, SimpleResizeNearestNeighborTest<DataType::QAsymmU8>, DataLayout::NCHW) @@ -873,6 +946,9 @@ ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint16, ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNop, ResizeNearestNeighborNopTest<DataType::Float32>, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopInt8, + ResizeNearestNeighborNopTest<DataType::QAsymmS8>, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8, ResizeNearestNeighborNopTest<DataType::QAsymmU8>, DataLayout::NCHW) @@ -882,6 +958,9 @@ ARMNN_AUTO_TEST_CASE(esizeNearestNeighborNopUint16, ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMin, ResizeNearestNeighborSqMinTest<DataType::Float32>, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinInt8, + ResizeNearestNeighborSqMinTest<DataType::QAsymmS8>, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8, ResizeNearestNeighborSqMinTest<DataType::QAsymmU8>, DataLayout::NCHW) @@ -891,6 +970,9 @@ ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint16, ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMin, ResizeNearestNeighborMinTest<DataType::Float32>, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinInt8, + ResizeNearestNeighborMinTest<DataType::QAsymmS8>, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8, ResizeNearestNeighborMinTest<DataType::QAsymmU8>, DataLayout::NCHW) @@ -900,6 +982,9 @@ ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint16, ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMag, ResizeNearestNeighborMagTest<DataType::Float32>, DataLayout::NCHW, 0.10f, 50, 0.11f, 20) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagInt8, + ResizeNearestNeighborMagTest<DataType::QAsymmS8>, + DataLayout::NCHW, 0.10f, 50, 0.11f, 20) ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8, ResizeNearestNeighborMagTest<DataType::QAsymmU8>, DataLayout::NCHW, 0.10f, 50, 0.11f, 20) @@ -911,6 +996,9 @@ ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint16, ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopNhwc, ResizeNearestNeighborNopTest<DataType::Float32>, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopInt8Nhwc, + ResizeNearestNeighborNopTest<DataType::QAsymmS8>, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8Nhwc, ResizeNearestNeighborNopTest<DataType::QAsymmU8>, DataLayout::NHWC) @@ -920,6 +1008,9 @@ ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint16Nhwc, ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborNhwc, SimpleResizeNearestNeighborTest<DataType::Float32>, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborInt8Nhwc, + SimpleResizeNearestNeighborTest<DataType::QAsymmS8>, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8Nhwc, SimpleResizeNearestNeighborTest<DataType::QAsymmU8>, DataLayout::NHWC) @@ -929,6 +1020,9 @@ ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint16Nhwc, ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinNhwc, ResizeNearestNeighborSqMinTest<DataType::Float32>, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinInt8Nhwc, + ResizeNearestNeighborSqMinTest<DataType::QAsymmS8>, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8Nhwc, ResizeNearestNeighborSqMinTest<DataType::QAsymmU8>, DataLayout::NHWC) @@ -938,6 +1032,9 @@ ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint16Nhwc, ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinNhwc, ResizeNearestNeighborMinTest<DataType::Float32>, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinInt8Nhwc, + ResizeNearestNeighborMinTest<DataType::QAsymmS8>, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8Nhwc, ResizeNearestNeighborMinTest<DataType::QAsymmU8>, DataLayout::NHWC) @@ -947,6 +1044,9 @@ ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint16Nhwc, ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagNhwc, ResizeNearestNeighborMagTest<DataType::Float32>, DataLayout::NHWC, 0.10f, 50, 0.11f, 20) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagInt8Nhwc, + ResizeNearestNeighborMagTest<DataType::QAsymmS8>, + DataLayout::NHWC, 0.10f, 50, 0.11f, 20) ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8Nhwc, ResizeNearestNeighborMagTest<DataType::QAsymmU8>, DataLayout::NHWC, 0.10f, 50, 0.11f, 20) @@ -1083,6 +1183,7 @@ ARMNN_AUTO_TEST_CASE(SimpleFloorQuantisedSymm16, SimpleFloorTest<DataType::QSymm // Reshape ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeTest<DataType::Float32>) +ARMNN_AUTO_TEST_CASE(SimpleReshapeQuantisedAsymmS8, SimpleReshapeTest<DataType::QAsymmS8>) ARMNN_AUTO_TEST_CASE(SimpleReshapeQuantisedAsymm8, SimpleReshapeTest<DataType::QAsymmU8>) ARMNN_AUTO_TEST_CASE(SimpleReshapeQuantisedSymm16, SimpleReshapeTest<DataType::QSymmS16>) ARMNN_AUTO_TEST_CASE(Reshape5d, Reshape5dTest<DataType::Float32>) @@ -1094,6 +1195,8 @@ ARMNN_AUTO_TEST_CASE(RsqrtZero, RsqrtZeroTest<DataType::Float32>) ARMNN_AUTO_TEST_CASE(RsqrtNegative, RsqrtNegativeTest<DataType::Float32>) ARMNN_AUTO_TEST_CASE(Rsqrt2dFloat16, Rsqrt2dTest<DataType::Float16>) ARMNN_AUTO_TEST_CASE(Rsqrt3dFloat16, Rsqrt3dTest<DataType::Float16>) +ARMNN_AUTO_TEST_CASE(Rsqrt2dQuantisedAsymmS8, Rsqrt2dTest<DataType::QAsymmS8>) +ARMNN_AUTO_TEST_CASE(Rsqrt3dQuantisedAsymmS8, Rsqrt3dTest<DataType::QAsymmS8>) ARMNN_AUTO_TEST_CASE(Rsqrt2dQuantisedAsymm8, Rsqrt2dTest<DataType::QAsymmU8>) ARMNN_AUTO_TEST_CASE(Rsqrt3dQuantisedAsymm8, Rsqrt3dTest<DataType::QAsymmU8>) ARMNN_AUTO_TEST_CASE(Rsqrt2dQuantisedSymm16, Rsqrt2dTest<DataType::QSymmS16>) @@ -1108,6 +1211,10 @@ ARMNN_AUTO_TEST_CASE(SimplePermuteFloat32, SimplePermuteTest<DataType::Float32>) ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet1Test, PermuteValueSet1Test<DataType::Float32>) ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet2Test, PermuteValueSet2Test<DataType::Float32>) ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet3Test, PermuteValueSet3Test<DataType::Float32>) +ARMNN_AUTO_TEST_CASE(SimplePermuteQASymS8, SimplePermuteTest<DataType::QAsymmS8>) +ARMNN_AUTO_TEST_CASE(PermuteQASymmS8ValueSet1Test, PermuteValueSet1Test<DataType::QAsymmS8>) +ARMNN_AUTO_TEST_CASE(PermuteQASymmS8ValueSet2Test, PermuteValueSet2Test<DataType::QAsymmS8>) +ARMNN_AUTO_TEST_CASE(PermuteQASymmS8ValueSet3Test, PermuteValueSet3Test<DataType::QAsymmS8>) ARMNN_AUTO_TEST_CASE(SimplePermuteQASymm8, SimplePermuteTest<DataType::QAsymmU8>) ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet1Test, PermuteValueSet1Test<DataType::QAsymmU8>) ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet2Test, PermuteValueSet2Test<DataType::QAsymmU8>) @@ -1168,6 +1275,14 @@ ARMNN_AUTO_TEST_CASE(MeanVts1Float32, MeanVts1Test<DataType::Float32>) ARMNN_AUTO_TEST_CASE(MeanVts2Float32, MeanVts2Test<DataType::Float32>) ARMNN_AUTO_TEST_CASE(MeanVts3Float32, MeanVts3Test<DataType::Float32>) +ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedAsymmS8, MeanSimpleTest<DataType::QAsymmS8>) +ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedAsymmS8, MeanSimpleAxisTest<DataType::QAsymmS8>) +ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedAsymmS8, MeanKeepDimsTest<DataType::QAsymmS8>) +ARMNN_AUTO_TEST_CASE(MeanMultipleDimsQuantisedAsymmS8, MeanMultipleDimsTest<DataType::QAsymmS8>) +ARMNN_AUTO_TEST_CASE(MeanVts1QuantisedAsymmS8, MeanVts1Test<DataType::QAsymmS8>) +ARMNN_AUTO_TEST_CASE(MeanVts2QuantisedAsymmS8, MeanVts2Test<DataType::QAsymmS8>) +ARMNN_AUTO_TEST_CASE(MeanVts3QuantisedAsymmS8, MeanVts3Test<DataType::QAsymmS8>) + ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedAsymm8, MeanSimpleTest<DataType::QAsymmU8>) ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedAsymm8, MeanSimpleAxisTest<DataType::QAsymmU8>) ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedAsymm8, MeanKeepDimsTest<DataType::QAsymmU8>) @@ -1201,6 +1316,11 @@ ARMNN_AUTO_TEST_CASE(ArgMaxChannelSigned32, ArgMaxChannelTest<DataType::Signed32 ARMNN_AUTO_TEST_CASE(ArgMaxHeightSigned32, ArgMaxHeightTest<DataType::Signed32>) ARMNN_AUTO_TEST_CASE(ArgMinWidthSigned32, ArgMinWidthTest<DataType::Signed32>) +ARMNN_AUTO_TEST_CASE(ArgMaxSimpleQuantisedAsymmS8, ArgMaxSimpleTest<DataType::QAsymmS8>) +ARMNN_AUTO_TEST_CASE(ArgMinSimpleQuantisedAsymmS8, ArgMinSimpleTest<DataType::QAsymmS8>) +ARMNN_AUTO_TEST_CASE(ArgMinChannelQuantisedAsymmS8, ArgMinChannelTest<DataType::QAsymmS8>) +ARMNN_AUTO_TEST_CASE(ArgMaxChannelQuantisedAsymmS8, ArgMaxChannelTest<DataType::QAsymmS8>) + ARMNN_AUTO_TEST_CASE(ArgMaxSimpleQuantisedAsymm8, ArgMaxSimpleTest<DataType::QAsymmU8>) ARMNN_AUTO_TEST_CASE(ArgMinSimpleQuantisedAsymm8, ArgMinSimpleTest<DataType::QAsymmU8>) ARMNN_AUTO_TEST_CASE(ArgMinChannelQuantisedAsymm8, ArgMinChannelTest<DataType::QAsymmU8>) @@ -1269,6 +1389,14 @@ ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat16_5, BatchToSpaceNdNhwcTest5<DataTy ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat16_6, BatchToSpaceNdNhwcTest6<DataType::Float16>) ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat16_7, BatchToSpaceNdNhwcTest7<DataType::Float16>) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt1, BatchToSpaceNdNhwcTest1<DataType::QAsymmS8>) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt2, BatchToSpaceNdNhwcTest2<DataType::QAsymmS8>) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt3, BatchToSpaceNdNhwcTest3<DataType::QAsymmS8>) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt4, BatchToSpaceNdNhwcTest4<DataType::QAsymmS8>) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt5, BatchToSpaceNdNhwcTest5<DataType::QAsymmS8>) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt6, BatchToSpaceNdNhwcTest6<DataType::QAsymmS8>) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt7, BatchToSpaceNdNhwcTest7<DataType::QAsymmS8>) + ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint1, BatchToSpaceNdNhwcTest1<DataType::QAsymmU8>) ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint2, BatchToSpaceNdNhwcTest2<DataType::QAsymmU8>) ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint3, BatchToSpaceNdNhwcTest3<DataType::QAsymmU8>) @@ -1293,6 +1421,14 @@ ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat16_5, BatchToSpaceNdNchwTest5<DataTy ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat16_6, BatchToSpaceNdNchwTest6<DataType::Float16>) ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat16_7, BatchToSpaceNdNchwTest7<DataType::Float16>) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt1, BatchToSpaceNdNchwTest1<DataType::QAsymmS8>) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt2, BatchToSpaceNdNchwTest2<DataType::QAsymmS8>) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt3, BatchToSpaceNdNchwTest3<DataType::QAsymmS8>) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt4, BatchToSpaceNdNchwTest4<DataType::QAsymmS8>) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt5, BatchToSpaceNdNchwTest5<DataType::QAsymmS8>) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt6, BatchToSpaceNdNchwTest6<DataType::QAsymmS8>) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt7, BatchToSpaceNdNchwTest7<DataType::QAsymmS8>) + ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint1, BatchToSpaceNdNchwTest1<DataType::QAsymmU8>) ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint2, BatchToSpaceNdNchwTest2<DataType::QAsymmU8>) ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint3, BatchToSpaceNdNchwTest3<DataType::QAsymmU8>) @@ -1320,6 +1456,11 @@ ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_2, DepthToSpaceTest2<DataType::Floa ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_3, DepthToSpaceTest3<DataType::Float16>, DataLayout::NCHW); ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_4, DepthToSpaceTest4<DataType::Float16>, DataLayout::NCHW); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_1, DepthToSpaceTest1<DataType::QAsymmS8>, DataLayout::NCHW); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_2, DepthToSpaceTest2<DataType::QAsymmS8>, DataLayout::NCHW); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_3, DepthToSpaceTest3<DataType::QAsymmS8>, DataLayout::NCHW); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_4, DepthToSpaceTest4<DataType::QAsymmS8>, DataLayout::NCHW); + ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_1, DepthToSpaceTest1<DataType::QAsymmU8>, DataLayout::NCHW); ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_2, DepthToSpaceTest2<DataType::QAsymmU8>, DataLayout::NCHW); ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_3, DepthToSpaceTest3<DataType::QAsymmU8>, DataLayout::NCHW); @@ -1340,6 +1481,11 @@ ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_2, DepthToSpaceTest2<DataType::Floa ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_3, DepthToSpaceTest3<DataType::Float16>, DataLayout::NHWC); ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_4, DepthToSpaceTest4<DataType::Float16>, DataLayout::NHWC); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_1, DepthToSpaceTest1<DataType::QAsymmS8>, DataLayout::NHWC); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_2, DepthToSpaceTest2<DataType::QAsymmS8>, DataLayout::NHWC); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_3, DepthToSpaceTest3<DataType::QAsymmS8>, DataLayout::NHWC); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_4, DepthToSpaceTest4<DataType::QAsymmS8>, DataLayout::NHWC); + ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_1, DepthToSpaceTest1<DataType::QAsymmU8>, DataLayout::NHWC); ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_2, DepthToSpaceTest2<DataType::QAsymmU8>, DataLayout::NHWC); ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_3, DepthToSpaceTest3<DataType::QAsymmU8>, DataLayout::NHWC); @@ -1468,6 +1614,8 @@ ARMNN_AUTO_TEST_CASE(Abs2dSigned32, Abs2dTest<DataType::Signed32>) ARMNN_AUTO_TEST_CASE(Abs3dSigned32, Abs3dTest<DataType::Signed32>) ARMNN_AUTO_TEST_CASE(AbsZeroSigned32, AbsZeroTest<DataType::Signed32>) +ARMNN_AUTO_TEST_CASE(Abs2dQuantisedAsymmS8, Abs2dTest<DataType::QAsymmS8>) +ARMNN_AUTO_TEST_CASE(Abs3dQuantisedAsymmS8, Abs3dTest<DataType::QAsymmS8>) ARMNN_AUTO_TEST_CASE(Abs2dQuantisedAsymm8, Abs2dTest<DataType::QAsymmU8>) ARMNN_AUTO_TEST_CASE(Abs3dQuantisedAsymm8, Abs3dTest<DataType::QAsymmU8>) ARMNN_AUTO_TEST_CASE(Abs2dQuantisedSymm16, Abs2dTest<DataType::QSymmS16>) @@ -1482,6 +1630,16 @@ BOOST_AUTO_TEST_CASE(DetectionPostProcessFastNmsFloat) { DetectionPostProcessFastNmsFloatTest<RefWorkloadFactory>(); } +BOOST_AUTO_TEST_CASE(DetectionPostProcessRegularNmsInt8) +{ + DetectionPostProcessRegularNmsQuantizedTest< + RefWorkloadFactory, DataType::QAsymmS8>(); +} +BOOST_AUTO_TEST_CASE(DetectionPostProcessFastNmsInt8) +{ + DetectionPostProcessRegularNmsQuantizedTest< + RefWorkloadFactory, DataType::QAsymmS8>(); +} BOOST_AUTO_TEST_CASE(DetectionPostProcessRegularNmsUint8) { DetectionPostProcessRegularNmsQuantizedTest< @@ -1552,6 +1710,10 @@ ARMNN_AUTO_TEST_CASE(SimpleTransposeFloat32, SimpleTransposeTest<DataType::Float ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet1Test, TransposeValueSet1Test<DataType::Float32>) ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet2Test, TransposeValueSet2Test<DataType::Float32>) ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet3Test, TransposeValueSet3Test<DataType::Float32>) +ARMNN_AUTO_TEST_CASE(SimpleTransposeQASymmS8, SimpleTransposeTest<DataType::QAsymmS8>) +ARMNN_AUTO_TEST_CASE(TransposeQASymmS8ValueSet1Test, TransposeValueSet1Test<DataType::QAsymmS8>) +ARMNN_AUTO_TEST_CASE(TransposeQASymmS8ValueSet2Test, TransposeValueSet2Test<DataType::QAsymmS8>) +ARMNN_AUTO_TEST_CASE(TransposeQASymmS8ValueSet3Test, TransposeValueSet3Test<DataType::QAsymmS8>) ARMNN_AUTO_TEST_CASE(SimpleTransposeQASymm8, SimpleTransposeTest<DataType::QAsymmU8>) ARMNN_AUTO_TEST_CASE(TransposeQASymm8ValueSet1Test, TransposeValueSet1Test<DataType::QAsymmU8>) ARMNN_AUTO_TEST_CASE(TransposeQASymm8ValueSet2Test, TransposeValueSet2Test<DataType::QAsymmU8>) @@ -1570,6 +1732,14 @@ ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dFloatNhwc, SimpleTransposeConvolution2dTest<DataType::Float32, DataType::Float32>, true, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dInt8Nchw, + SimpleTransposeConvolution2dTest<DataType::QAsymmS8, DataType::Signed32>, + true, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dInt8Nhwc, + SimpleTransposeConvolution2dTest<DataType::QAsymmS8, DataType::Signed32>, + true, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dUint8Nchw, SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>, true, @@ -1595,6 +1765,14 @@ ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dFloatNhwc, SimpleTransposeConvolution2dTest<DataType::Float32, DataType::Float32>, true, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dInt8Nchw, + SimpleTransposeConvolution2dTest<DataType::QAsymmS8, DataType::Signed32>, + true, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dInt8Nhwc, + SimpleTransposeConvolution2dTest<DataType::QAsymmS8, DataType::Signed32>, + true, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dUint8Nchw, SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>, true, @@ -1620,6 +1798,14 @@ ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dFloatNhwc, PaddedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>, true, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dInt8Nchw, + PaddedTransposeConvolution2dTest<DataType::QAsymmS8, DataType::Signed32>, + true, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dInt8Nhwc, + PaddedTransposeConvolution2dTest<DataType::QAsymmS8, DataType::Signed32>, + true, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dUint8Nchw, PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>, true, @@ -1645,6 +1831,14 @@ ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dFloatNhwc, PaddedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>, true, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dInt8Nchw, + PaddedTransposeConvolution2dTest<DataType::QAsymmS8, DataType::Signed32>, + true, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dInt8Nhwc, + PaddedTransposeConvolution2dTest<DataType::QAsymmS8, DataType::Signed32>, + true, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dUint8Nchw, PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>, true, @@ -1670,6 +1864,14 @@ ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dFloatNhwc, StridedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>, true, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dInt8Nchw, + StridedTransposeConvolution2dTest<DataType::QAsymmS8, DataType::Signed32>, + true, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dInt8Nhwc, + StridedTransposeConvolution2dTest<DataType::QAsymmS8, DataType::Signed32>, + true, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dUint8Nchw, StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>, true, @@ -1695,6 +1897,14 @@ ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dFloatNhwc, StridedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>, true, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dInt8Nchw, + StridedTransposeConvolution2dTest<DataType::QAsymmS8, DataType::Signed32>, + true, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dInt8Nhwc, + StridedTransposeConvolution2dTest<DataType::QAsymmS8, DataType::Signed32>, + true, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dUint8Nchw, StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>, true, @@ -1718,6 +1928,12 @@ ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dFloatNchw, ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dFloatNhwc, MultiChannelTransposeConvolution2dTest<DataType::Float32, DataType::Float32>, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dInt8Nchw, + MultiChannelTransposeConvolution2dTest<DataType::QAsymmS8, DataType::Signed32>, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dInt8Nhwc, + MultiChannelTransposeConvolution2dTest<DataType::QAsymmS8, DataType::Signed32>, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dUint8Nchw, MultiChannelTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>, DataLayout::NCHW) @@ -1754,6 +1970,8 @@ ARMNN_AUTO_TEST_CASE(NegZero, NegZeroTest<DataType::Float32>) ARMNN_AUTO_TEST_CASE(NegNegative, NegNegativeTest<DataType::Float32>) ARMNN_AUTO_TEST_CASE(Neg2dFloat16, Neg2dTest<DataType::Float16>) ARMNN_AUTO_TEST_CASE(Neg3dFloat16, Neg3dTest<DataType::Float16>) +ARMNN_AUTO_TEST_CASE(Neg2dQuantisedAsymmS8, Neg2dTest<DataType::QAsymmS8>) +ARMNN_AUTO_TEST_CASE(Neg3dQuantisedAsymmS8, Neg3dTest<DataType::QAsymmS8>) ARMNN_AUTO_TEST_CASE(Neg2dQuantisedAsymm8, Neg2dTest<DataType::QAsymmU8>) ARMNN_AUTO_TEST_CASE(Neg3dQuantisedAsymm8, Neg3dTest<DataType::QAsymmU8>) ARMNN_AUTO_TEST_CASE(Neg2dQuantisedSymm16, Neg2dTest<DataType::QSymmS16>) diff --git a/src/backends/reference/workloads/Pad.cpp b/src/backends/reference/workloads/Pad.cpp index ffdd469609..1b634145fc 100644 --- a/src/backends/reference/workloads/Pad.cpp +++ b/src/backends/reference/workloads/Pad.cpp @@ -177,6 +177,12 @@ template void Pad<uint8_t>(const TensorInfo& inputInfo, const uint8_t* inputData, uint8_t* outData, const float padValue); +template void Pad<int8_t>(const TensorInfo& inputInfo, + const TensorInfo& outputInfo, + std::vector<std::pair<unsigned int, unsigned int>> m_PadList, + const int8_t* inputData, + int8_t* outData, + const float padValue); template void Pad<int16_t>(const TensorInfo& inputInfo, const TensorInfo& outputInfo, std::vector<std::pair<unsigned int, unsigned int>> m_PadList, diff --git a/src/backends/reference/workloads/RefPadWorkload.cpp b/src/backends/reference/workloads/RefPadWorkload.cpp index 777682d70c..6f82d5ffdb 100644 --- a/src/backends/reference/workloads/RefPadWorkload.cpp +++ b/src/backends/reference/workloads/RefPadWorkload.cpp @@ -36,6 +36,7 @@ void RefPadWorkload<DataType>::Execute() const template class RefPadWorkload<DataType::BFloat16>; template class RefPadWorkload<DataType::Float32>; template class RefPadWorkload<DataType::Float16>; +template class RefPadWorkload<DataType::QAsymmS8>; template class RefPadWorkload<DataType::QAsymmU8>; template class RefPadWorkload<DataType::QSymmS16>; diff --git a/src/backends/reference/workloads/RefPadWorkload.hpp b/src/backends/reference/workloads/RefPadWorkload.hpp index 5134ac8bff..74dcab1967 100644 --- a/src/backends/reference/workloads/RefPadWorkload.hpp +++ b/src/backends/reference/workloads/RefPadWorkload.hpp @@ -31,9 +31,10 @@ public: }; using RefPadBFloat16Workload = RefPadWorkload<DataType::BFloat16>; -using RefPadFloat32Workload = RefPadWorkload<DataType::Float32>; -using RefPadFloat16Workload = RefPadWorkload<DataType::Float16>; -using RefPadQAsymm8Workload = RefPadWorkload<DataType::QAsymmU8>; -using RefPadQSymm16Workload = RefPadWorkload<DataType::QSymmS16>; +using RefPadFloat32Workload = RefPadWorkload<DataType::Float32>; +using RefPadFloat16Workload = RefPadWorkload<DataType::Float16>; +using RefPadQAsymmS8Workload = RefPadWorkload<DataType::QAsymmS8>; +using RefPadQAsymm8Workload = RefPadWorkload<DataType::QAsymmU8>; +using RefPadQSymm16Workload = RefPadWorkload<DataType::QSymmS16>; } //namespace armnn diff --git a/src/backends/reference/workloads/RefPermuteWorkload.cpp b/src/backends/reference/workloads/RefPermuteWorkload.cpp index 5751ed80a3..75e9d0acf0 100644 --- a/src/backends/reference/workloads/RefPermuteWorkload.cpp +++ b/src/backends/reference/workloads/RefPermuteWorkload.cpp @@ -31,6 +31,7 @@ void RefPermuteWorkload<DataType>::Execute() const template class RefPermuteWorkload<DataType::BFloat16>; template class RefPermuteWorkload<DataType::Float16>; template class RefPermuteWorkload<DataType::Float32>; +template class RefPermuteWorkload<DataType::QAsymmS8>; template class RefPermuteWorkload<DataType::QAsymmU8>; template class RefPermuteWorkload<DataType::QSymmS16>; diff --git a/src/backends/reference/workloads/RefPermuteWorkload.hpp b/src/backends/reference/workloads/RefPermuteWorkload.hpp index a8d308e47c..b9f259a8f8 100644 --- a/src/backends/reference/workloads/RefPermuteWorkload.hpp +++ b/src/backends/reference/workloads/RefPermuteWorkload.hpp @@ -28,9 +28,10 @@ public: }; using RefPermuteBFloat16Workload = RefPermuteWorkload<DataType::BFloat16>; -using RefPermuteFloat16Workload = RefPermuteWorkload<DataType::Float16>; -using RefPermuteFloat32Workload = RefPermuteWorkload<DataType::Float32>; -using RefPermuteQAsymm8Workload = RefPermuteWorkload<DataType::QAsymmU8>; -using RefPermuteQSymm16Workload = RefPermuteWorkload<DataType::QSymmS16>; +using RefPermuteFloat16Workload = RefPermuteWorkload<DataType::Float16>; +using RefPermuteFloat32Workload = RefPermuteWorkload<DataType::Float32>; +using RefPermuteQAsymmS8Workload = RefPermuteWorkload<DataType::QAsymmS8>; +using RefPermuteQAsymm8Workload = RefPermuteWorkload<DataType::QAsymmU8>; +using RefPermuteQSymm16Workload = RefPermuteWorkload<DataType::QSymmS16>; } //namespace armnn
\ No newline at end of file diff --git a/src/backends/reference/workloads/RefTransposeWorkload.cpp b/src/backends/reference/workloads/RefTransposeWorkload.cpp index 242668b6b1..4e027bee2e 100644 --- a/src/backends/reference/workloads/RefTransposeWorkload.cpp +++ b/src/backends/reference/workloads/RefTransposeWorkload.cpp @@ -30,6 +30,7 @@ void RefTransposeWorkload<DataType>::Execute() const template class RefTransposeWorkload<DataType::BFloat16>; template class RefTransposeWorkload<DataType::Float16>; template class RefTransposeWorkload<DataType::Float32>; +template class RefTransposeWorkload<DataType::QAsymmS8>; template class RefTransposeWorkload<DataType::QAsymmU8>; template class RefTransposeWorkload<DataType::QSymmS16>; diff --git a/src/backends/reference/workloads/RefTransposeWorkload.hpp b/src/backends/reference/workloads/RefTransposeWorkload.hpp index dcfe618b75..387572aab9 100644 --- a/src/backends/reference/workloads/RefTransposeWorkload.hpp +++ b/src/backends/reference/workloads/RefTransposeWorkload.hpp @@ -30,6 +30,7 @@ public: using RefTransposeBFloat16Workload = RefTransposeWorkload<DataType::BFloat16>; using RefTransposeFloat16Workload = RefTransposeWorkload<DataType::Float16>; using RefTransposeFloat32Workload = RefTransposeWorkload<DataType::Float32>; +using RefTransposeQAsymmS8Workload = RefTransposeWorkload<DataType::QAsymmS8>; using RefTransposeQAsymm8Workload = RefTransposeWorkload<DataType::QAsymmU8>; using RefTransposeQSymm16Workload = RefTransposeWorkload<DataType::QSymmS16>; |