From 303980c502c721f13d65e7087be6c0758df65044 Mon Sep 17 00:00:00 2001 From: Sadik Armagan Date: Fri, 17 Apr 2020 12:45:14 +0100 Subject: IVGCVSW-4668 Add TENSOR_QUANT8_ASYMM_SIGNED data type support to CpuRef operators Signed-off-by: Teresa Charlin Signed-off-by: Sadik Armagan Change-Id: I094125ba80699cc3cf5226bda6662a54e6caa988 --- src/backends/reference/RefLayerSupport.cpp | 140 ++++++++----- src/backends/reference/RefWorkloadFactory.cpp | 12 ++ src/backends/reference/test/RefLayerTests.cpp | 218 +++++++++++++++++++++ src/backends/reference/workloads/Pad.cpp | 6 + .../reference/workloads/RefPadWorkload.cpp | 1 + .../reference/workloads/RefPadWorkload.hpp | 9 +- .../reference/workloads/RefPermuteWorkload.cpp | 1 + .../reference/workloads/RefPermuteWorkload.hpp | 9 +- .../reference/workloads/RefTransposeWorkload.cpp | 1 + .../reference/workloads/RefTransposeWorkload.hpp | 1 + 10 files changed, 336 insertions(+), 62 deletions(-) (limited to 'src/backends/reference') diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp index 25d639a38a..65ae14ff40 100644 --- a/src/backends/reference/RefLayerSupport.cpp +++ b/src/backends/reference/RefLayerSupport.cpp @@ -181,10 +181,11 @@ bool RefLayerSupport::IsArgMinMaxSupported(const armnn::TensorInfo &input, const { IgnoreUnused(descriptor); - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16, DataType::Signed32 @@ -211,11 +212,12 @@ bool RefLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input, { IgnoreUnused(descriptor); - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -260,11 +262,12 @@ bool RefLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input, std::string outputTensorStr = "output"; // Define supported types. - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -302,12 +305,13 @@ bool RefLayerSupport::IsComparisonSupported(const TensorInfo& input0, Optional reasonIfUnsupported) const { IgnoreUnused(descriptor); - std::array supportedInputTypes = + std::array supportedInputTypes = { DataType::Boolean, DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16, DataType::Signed32 @@ -339,8 +343,8 @@ bool RefLayerSupport::IsConcatSupported(const std::vector inp DataType::BFloat16, DataType::Float32, DataType::Float16, - DataType::QAsymmU8, DataType::QAsymmS8, + DataType::QAsymmU8, DataType::QSymmS16 }; @@ -366,11 +370,11 @@ bool RefLayerSupport::IsConstantSupported(const TensorInfo& output, { DataType::BFloat16, DataType::Float32, - DataType::Signed32, - DataType::QAsymmU8, DataType::QAsymmS8, + DataType::QAsymmU8, DataType::QSymmS8, - DataType::QSymmS16 + DataType::QSymmS16, + DataType::Signed32 }; return CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported, @@ -462,8 +466,8 @@ bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input, DataType::BFloat16, DataType::Float32, DataType::Float16, - DataType::QAsymmU8, DataType::QAsymmS8, + DataType::QAsymmU8, DataType::QSymmS8, DataType::QSymmS16 }; @@ -495,9 +499,9 @@ bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input, ARMNN_NO_DEPRECATE_WARN_BEGIN std::array supportedWeightTypes = { + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS8, - DataType::QAsymmS8, DataType::QuantizedSymm8PerAxis // deprecated }; ARMNN_NO_DEPRECATE_WARN_END @@ -543,8 +547,8 @@ bool RefLayerSupport::IsDebugSupported(const TensorInfo& input, DataType::BFloat16, DataType::Float16, DataType::Float32, - DataType::QAsymmU8, DataType::QAsymmS8, + DataType::QAsymmU8, DataType::QSymmS8, DataType::QSymmS16, DataType::Signed32 @@ -570,11 +574,12 @@ bool RefLayerSupport::IsDepthToSpaceSupported(const TensorInfo& input, IgnoreUnused(descriptor); bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -598,6 +603,7 @@ bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input, const Optional& biases, Optional reasonIfUnsupported) const { + IgnoreUnused(descriptor); bool supported = true; // Define supported types. @@ -606,9 +612,9 @@ bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input, DataType::BFloat16, DataType::Float32, DataType::Float16, - DataType::QSymmS8, DataType::QAsymmS8, DataType::QAsymmU8, + DataType::QSymmS8, DataType::QSymmS16 }; @@ -621,21 +627,22 @@ bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input, supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported, "Reference DepthwiseConvolution2d: input and output types mismatched."); - ARMNN_NO_DEPRECATE_WARN_BEGIN - std::array supportedWeightTypes = - { - DataType::QAsymmU8, - DataType::QSymmS8, - DataType::QuantizedSymm8PerAxis // deprecated - }; - ARMNN_NO_DEPRECATE_WARN_END - const DataType inputType = input.GetDataType(); if (IsQuantized8BitType(inputType)) { + ARMNN_NO_DEPRECATE_WARN_BEGIN + std::array supportedWeightTypes = + { + DataType::QAsymmS8, + DataType::QAsymmU8, + DataType::QSymmS8, + DataType::QuantizedSymm8PerAxis // deprecated + }; + ARMNN_NO_DEPRECATE_WARN_END supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported, - "Reference convolution2d: weights type not supported for quantized input."); + "Reference DepthwiseConvolution2d: weights type not supported for " + "quantized input."); } else { @@ -658,7 +665,6 @@ bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input, supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported, "Reference DepthwiseConvolution2d: biases is not a supported type."); } - IgnoreUnused(descriptor); return supported; @@ -716,10 +722,11 @@ bool RefLayerSupport::IsDetectionPostProcessSupported(const TensorInfo& boxEncod bool supported = true; - std::array supportedInputTypes = + std::array supportedInputTypes = { DataType::BFloat16, DataType::Float32, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -750,10 +757,11 @@ bool RefLayerSupport::IsDivisionSupported(const TensorInfo& input0, { bool supported = true; - std::array supportedTypes = { + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -786,11 +794,12 @@ bool RefLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input, { IgnoreUnused(descriptor); - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16, DataType::Signed32 @@ -883,8 +892,8 @@ bool RefLayerSupport::IsFullyConnectedSupported(const TensorInfo& input, DataType::BFloat16, DataType::Float32, DataType::Float16, - DataType::QAsymmU8, DataType::QAsymmS8, + DataType::QAsymmU8, DataType::QSymmS16 }; @@ -913,8 +922,9 @@ bool RefLayerSupport::IsFullyConnectedSupported(const TensorInfo& input, } ARMNN_NO_DEPRECATE_WARN_BEGIN - std::array supportedWeightTypes = + std::array supportedWeightTypes = { + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS8, DataType::QuantizedSymm8PerAxis // deprecated @@ -969,11 +979,12 @@ bool RefLayerSupport::IsGatherSupported(const armnn::TensorInfo& input0, armnn::Optional reasonIfUnsupported) const { bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1050,11 +1061,12 @@ bool RefLayerSupport::IsL2NormalizationSupported(const TensorInfo& input, { IgnoreUnused(descriptor); // Define supported types - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1261,11 +1273,12 @@ bool RefLayerSupport::IsMeanSupported(const TensorInfo& input, std::string meanLayerStr = "Mean"; std::string outputTensorStr = "output"; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1328,11 +1341,12 @@ bool RefLayerSupport::IsMemCopySupported(const TensorInfo &input, { bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16, DataType::Boolean @@ -1357,10 +1371,11 @@ bool RefLayerSupport::IsMinimumSupported(const TensorInfo& input0, { bool supported = true; - std::array supportedTypes = { + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1397,8 +1412,8 @@ bool RefLayerSupport::IsMultiplicationSupported(const TensorInfo& input0, DataType::BFloat16, DataType::Float32, DataType::Float16, - DataType::QAsymmU8, DataType::QAsymmS8, + DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1431,11 +1446,12 @@ bool RefLayerSupport::IsNormalizationSupported(const TensorInfo& input, IgnoreUnused(descriptor); // Define supported types - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float16, DataType::Float32, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1470,11 +1486,12 @@ bool RefLayerSupport::IsPadSupported(const TensorInfo& input, bool supported = true; // Define supported output and inputs types. - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1500,11 +1517,12 @@ bool RefLayerSupport::IsPermuteSupported(const TensorInfo& input, bool supported = true; // Define supported output and inputs types. - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1574,8 +1592,8 @@ bool RefLayerSupport::IsQuantizeSupported(const TensorInfo& input, // Define supported output types. std::array supportedOutputTypes = { - DataType::QAsymmU8, DataType::QAsymmS8, + DataType::QAsymmU8, DataType::QSymmS8, DataType::QSymmS16 }; @@ -1616,11 +1634,12 @@ bool RefLayerSupport::IsResizeBilinearSupported(const TensorInfo& input, Optional reasonIfUnsupported) const { bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1649,8 +1668,8 @@ bool RefLayerSupport::IsResizeSupported(const TensorInfo& input, DataType::BFloat16, DataType::Float32, DataType::Float16, - DataType::QAsymmU8, DataType::QAsymmS8, + DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1684,10 +1703,11 @@ bool RefLayerSupport::IsSliceSupported(const TensorInfo& input, IgnoreUnused(descriptor); bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1741,11 +1761,12 @@ bool RefLayerSupport::IsSpaceToBatchNdSupported(const TensorInfo& input, { IgnoreUnused(descriptor); bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1771,11 +1792,12 @@ bool RefLayerSupport::IsSpaceToDepthSupported(const TensorInfo& input, IgnoreUnused(descriptor); bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1798,11 +1820,12 @@ bool RefLayerSupport::IsSplitterSupported(const TensorInfo& input, { IgnoreUnused(descriptor); bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1820,11 +1843,12 @@ bool RefLayerSupport::IsSplitterSupported(const TensorInfo& input, { IgnoreUnused(descriptor); bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1851,11 +1875,12 @@ bool RefLayerSupport::IsStackSupported(const std::vector& inp IgnoreUnused(descriptor); bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1883,10 +1908,11 @@ bool RefLayerSupport::IsStridedSliceSupported(const TensorInfo& input, IgnoreUnused(descriptor); bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1910,10 +1936,11 @@ bool RefLayerSupport::IsSubtractionSupported(const TensorInfo& input0, { bool supported = true; - std::array supportedTypes = { + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1946,11 +1973,12 @@ bool RefLayerSupport::IsPreluSupported(const TensorInfo& input, { bool supported = true; - std::array supportedTypes + std::array supportedTypes { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1983,12 +2011,14 @@ bool RefLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input, IgnoreUnused(descriptor); bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, + DataType::QSymmS8, DataType::QSymmS16 }; @@ -2003,11 +2033,12 @@ bool RefLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input, const DataType inputType = input.GetDataType(); - if (inputType == DataType::QAsymmU8) + if (IsQuantized8BitType(inputType)) { ARMNN_NO_DEPRECATE_WARN_BEGIN - std::array supportedWeightTypes = + std::array supportedWeightTypes = { + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS8, DataType::QuantizedSymm8PerAxis //Deprecated @@ -2052,11 +2083,12 @@ bool RefLayerSupport::IsTransposeSupported(const TensorInfo& input, bool supported = true; // Define supported output and inputs types. - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp index 5d3775a59d..4566fe5e40 100644 --- a/src/backends/reference/RefWorkloadFactory.cpp +++ b/src/backends/reference/RefWorkloadFactory.cpp @@ -468,6 +468,10 @@ std::unique_ptr RefWorkloadFactory::CreatePad(const PadQueueDescripto { return std::make_unique(descriptor, info); } + else if (IsQAsymmS8(info)) + { + return std::make_unique(descriptor, info); + } return MakeWorkload(descriptor, info); } @@ -482,6 +486,10 @@ std::unique_ptr RefWorkloadFactory::CreatePermute(const PermuteQueueD { return std::make_unique(descriptor, info); } + else if (IsQAsymmS8(info)) + { + return std::make_unique(descriptor, info); + } return MakeWorkloadHelper(descriptor, info); } @@ -603,6 +611,10 @@ std::unique_ptr RefWorkloadFactory::CreateTranspose(const TransposeQu { return std::make_unique(descriptor, info); } + else if (IsQAsymmS8(info)) + { + return std::make_unique(descriptor, info); + } return MakeWorkloadHelper(descriptor, info); } diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp index bcace79493..f50051aaac 100644 --- a/src/backends/reference/test/RefLayerTests.cpp +++ b/src/backends/reference/test/RefLayerTests.cpp @@ -86,6 +86,14 @@ ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Nhwc, Convolution2d3x3Dilation3x3Test, false, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Int8, + Convolution2d3x3Dilation3x3Test, + false, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3NhwcInt8, + Convolution2d3x3Dilation3x3Test, + false, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Uint8, Convolution2d3x3Dilation3x3Test, false, @@ -119,6 +127,14 @@ ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Nhwc, Convolution2d2x3x3Dilation3x3Test, false, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Int8, + Convolution2d2x3x3Dilation3x3Test, + false, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3NhwcInt8, + Convolution2d2x3x3Dilation3x3Test, + false, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Uint8, Convolution2d2x3x3Dilation3x3Test, false, @@ -152,6 +168,14 @@ ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Nhwc, Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test, false, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Int8, + Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test, + false, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcInt8, + Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test, + false, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Uint8, Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test, false, @@ -217,6 +241,14 @@ ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3NhwcBFloat16, DepthwiseConvolution2d3x3Dilation3x3Test, false, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3Int8, + DepthwiseConvolution2d3x3Dilation3x3Test, + false, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3NhwcInt8, + DepthwiseConvolution2d3x3Dilation3x3Test, + false, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3Uint8, DepthwiseConvolution2d3x3Dilation3x3Test, false, @@ -250,6 +282,14 @@ ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3NhwcBFloat16, DepthwiseConvolution2d2x3x3Dilation3x3Test, false, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3Int8, + DepthwiseConvolution2d2x3x3Dilation3x3Test, + false, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3NhwcInt8, + DepthwiseConvolution2d2x3x3Dilation3x3Test, + false, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3Uint8, DepthwiseConvolution2d2x3x3Dilation3x3Test, false, @@ -743,6 +783,9 @@ ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearFloat16, SimpleResizeBilinearTest, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearInt8, + SimpleResizeBilinearTest, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8, SimpleResizeBilinearTest, DataLayout::NCHW) @@ -755,6 +798,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearNop, ARMNN_AUTO_TEST_CASE(ResizeBilinearNopFloat16, ResizeBilinearNopTest, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearNopInt8, + ResizeBilinearNopTest, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8, ResizeBilinearNopTest, DataLayout::NCHW) @@ -767,6 +813,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin, ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinFloat16, ResizeBilinearSqMinTest, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinInt8, + ResizeBilinearSqMinTest, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8, ResizeBilinearSqMinTest, DataLayout::NCHW) @@ -779,6 +828,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMin, ARMNN_AUTO_TEST_CASE(ResizeBilinearMinFloat16, ResizeBilinearMinTest, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMinInt8, + ResizeBilinearMinTest, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8, ResizeBilinearMinTest, DataLayout::NCHW) @@ -791,6 +843,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMag, ARMNN_AUTO_TEST_CASE(ResizeBilinearMagFloat16, ResizeBilinearMagTest, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMagInt8, + ResizeBilinearMagTest, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8, ResizeBilinearMagTest, DataLayout::NCHW) @@ -805,6 +860,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc, ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwcFloat16, ResizeBilinearNopTest, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearNopInt8Nhwc, + ResizeBilinearNopTest, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8Nhwc, ResizeBilinearNopTest, DataLayout::NHWC) @@ -817,6 +875,9 @@ ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc, ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwcFloat16, SimpleResizeBilinearTest, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearInt8Nhwc, + SimpleResizeBilinearTest, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8Nhwc, SimpleResizeBilinearTest, DataLayout::NHWC) @@ -829,6 +890,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc, ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwcFloat16, ResizeBilinearSqMinTest, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinInt8Nhwc, + ResizeBilinearSqMinTest, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8Nhwc, ResizeBilinearSqMinTest, DataLayout::NHWC) @@ -841,6 +905,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc, ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwcFloat16, ResizeBilinearMinTest, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMinInt8Nhwc, + ResizeBilinearMinTest, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8Nhwc, ResizeBilinearMinTest, DataLayout::NHWC) @@ -853,6 +920,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc, ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwcFloat16, ResizeBilinearMagTest, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMagInt8Nhwc, + ResizeBilinearMagTest, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8Nhwc, ResizeBilinearMagTest, DataLayout::NHWC) @@ -864,6 +934,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint16Nhwc, ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighbor, SimpleResizeNearestNeighborTest, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborInt8, + SimpleResizeNearestNeighborTest, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8, SimpleResizeNearestNeighborTest, DataLayout::NCHW) @@ -873,6 +946,9 @@ ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint16, ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNop, ResizeNearestNeighborNopTest, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopInt8, + ResizeNearestNeighborNopTest, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8, ResizeNearestNeighborNopTest, DataLayout::NCHW) @@ -882,6 +958,9 @@ ARMNN_AUTO_TEST_CASE(esizeNearestNeighborNopUint16, ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMin, ResizeNearestNeighborSqMinTest, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinInt8, + ResizeNearestNeighborSqMinTest, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8, ResizeNearestNeighborSqMinTest, DataLayout::NCHW) @@ -891,6 +970,9 @@ ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint16, ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMin, ResizeNearestNeighborMinTest, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinInt8, + ResizeNearestNeighborMinTest, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8, ResizeNearestNeighborMinTest, DataLayout::NCHW) @@ -900,6 +982,9 @@ ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint16, ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMag, ResizeNearestNeighborMagTest, DataLayout::NCHW, 0.10f, 50, 0.11f, 20) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagInt8, + ResizeNearestNeighborMagTest, + DataLayout::NCHW, 0.10f, 50, 0.11f, 20) ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8, ResizeNearestNeighborMagTest, DataLayout::NCHW, 0.10f, 50, 0.11f, 20) @@ -911,6 +996,9 @@ ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint16, ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopNhwc, ResizeNearestNeighborNopTest, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopInt8Nhwc, + ResizeNearestNeighborNopTest, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8Nhwc, ResizeNearestNeighborNopTest, DataLayout::NHWC) @@ -920,6 +1008,9 @@ ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint16Nhwc, ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborNhwc, SimpleResizeNearestNeighborTest, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborInt8Nhwc, + SimpleResizeNearestNeighborTest, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8Nhwc, SimpleResizeNearestNeighborTest, DataLayout::NHWC) @@ -929,6 +1020,9 @@ ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint16Nhwc, ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinNhwc, ResizeNearestNeighborSqMinTest, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinInt8Nhwc, + ResizeNearestNeighborSqMinTest, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8Nhwc, ResizeNearestNeighborSqMinTest, DataLayout::NHWC) @@ -938,6 +1032,9 @@ ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint16Nhwc, ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinNhwc, ResizeNearestNeighborMinTest, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinInt8Nhwc, + ResizeNearestNeighborMinTest, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8Nhwc, ResizeNearestNeighborMinTest, DataLayout::NHWC) @@ -947,6 +1044,9 @@ ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint16Nhwc, ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagNhwc, ResizeNearestNeighborMagTest, DataLayout::NHWC, 0.10f, 50, 0.11f, 20) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagInt8Nhwc, + ResizeNearestNeighborMagTest, + DataLayout::NHWC, 0.10f, 50, 0.11f, 20) ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8Nhwc, ResizeNearestNeighborMagTest, DataLayout::NHWC, 0.10f, 50, 0.11f, 20) @@ -1083,6 +1183,7 @@ ARMNN_AUTO_TEST_CASE(SimpleFloorQuantisedSymm16, SimpleFloorTest) +ARMNN_AUTO_TEST_CASE(SimpleReshapeQuantisedAsymmS8, SimpleReshapeTest) ARMNN_AUTO_TEST_CASE(SimpleReshapeQuantisedAsymm8, SimpleReshapeTest) ARMNN_AUTO_TEST_CASE(SimpleReshapeQuantisedSymm16, SimpleReshapeTest) ARMNN_AUTO_TEST_CASE(Reshape5d, Reshape5dTest) @@ -1094,6 +1195,8 @@ ARMNN_AUTO_TEST_CASE(RsqrtZero, RsqrtZeroTest) ARMNN_AUTO_TEST_CASE(RsqrtNegative, RsqrtNegativeTest) ARMNN_AUTO_TEST_CASE(Rsqrt2dFloat16, Rsqrt2dTest) ARMNN_AUTO_TEST_CASE(Rsqrt3dFloat16, Rsqrt3dTest) +ARMNN_AUTO_TEST_CASE(Rsqrt2dQuantisedAsymmS8, Rsqrt2dTest) +ARMNN_AUTO_TEST_CASE(Rsqrt3dQuantisedAsymmS8, Rsqrt3dTest) ARMNN_AUTO_TEST_CASE(Rsqrt2dQuantisedAsymm8, Rsqrt2dTest) ARMNN_AUTO_TEST_CASE(Rsqrt3dQuantisedAsymm8, Rsqrt3dTest) ARMNN_AUTO_TEST_CASE(Rsqrt2dQuantisedSymm16, Rsqrt2dTest) @@ -1108,6 +1211,10 @@ ARMNN_AUTO_TEST_CASE(SimplePermuteFloat32, SimplePermuteTest) ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet1Test, PermuteValueSet1Test) ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet2Test, PermuteValueSet2Test) ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet3Test, PermuteValueSet3Test) +ARMNN_AUTO_TEST_CASE(SimplePermuteQASymS8, SimplePermuteTest) +ARMNN_AUTO_TEST_CASE(PermuteQASymmS8ValueSet1Test, PermuteValueSet1Test) +ARMNN_AUTO_TEST_CASE(PermuteQASymmS8ValueSet2Test, PermuteValueSet2Test) +ARMNN_AUTO_TEST_CASE(PermuteQASymmS8ValueSet3Test, PermuteValueSet3Test) ARMNN_AUTO_TEST_CASE(SimplePermuteQASymm8, SimplePermuteTest) ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet1Test, PermuteValueSet1Test) ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet2Test, PermuteValueSet2Test) @@ -1168,6 +1275,14 @@ ARMNN_AUTO_TEST_CASE(MeanVts1Float32, MeanVts1Test) ARMNN_AUTO_TEST_CASE(MeanVts2Float32, MeanVts2Test) ARMNN_AUTO_TEST_CASE(MeanVts3Float32, MeanVts3Test) +ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedAsymmS8, MeanSimpleTest) +ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedAsymmS8, MeanSimpleAxisTest) +ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedAsymmS8, MeanKeepDimsTest) +ARMNN_AUTO_TEST_CASE(MeanMultipleDimsQuantisedAsymmS8, MeanMultipleDimsTest) +ARMNN_AUTO_TEST_CASE(MeanVts1QuantisedAsymmS8, MeanVts1Test) +ARMNN_AUTO_TEST_CASE(MeanVts2QuantisedAsymmS8, MeanVts2Test) +ARMNN_AUTO_TEST_CASE(MeanVts3QuantisedAsymmS8, MeanVts3Test) + ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedAsymm8, MeanSimpleTest) ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedAsymm8, MeanSimpleAxisTest) ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedAsymm8, MeanKeepDimsTest) @@ -1201,6 +1316,11 @@ ARMNN_AUTO_TEST_CASE(ArgMaxChannelSigned32, ArgMaxChannelTest) ARMNN_AUTO_TEST_CASE(ArgMinWidthSigned32, ArgMinWidthTest) +ARMNN_AUTO_TEST_CASE(ArgMaxSimpleQuantisedAsymmS8, ArgMaxSimpleTest) +ARMNN_AUTO_TEST_CASE(ArgMinSimpleQuantisedAsymmS8, ArgMinSimpleTest) +ARMNN_AUTO_TEST_CASE(ArgMinChannelQuantisedAsymmS8, ArgMinChannelTest) +ARMNN_AUTO_TEST_CASE(ArgMaxChannelQuantisedAsymmS8, ArgMaxChannelTest) + ARMNN_AUTO_TEST_CASE(ArgMaxSimpleQuantisedAsymm8, ArgMaxSimpleTest) ARMNN_AUTO_TEST_CASE(ArgMinSimpleQuantisedAsymm8, ArgMinSimpleTest) ARMNN_AUTO_TEST_CASE(ArgMinChannelQuantisedAsymm8, ArgMinChannelTest) @@ -1269,6 +1389,14 @@ ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat16_5, BatchToSpaceNdNhwcTest5) ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat16_7, BatchToSpaceNdNhwcTest7) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt1, BatchToSpaceNdNhwcTest1) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt2, BatchToSpaceNdNhwcTest2) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt3, BatchToSpaceNdNhwcTest3) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt4, BatchToSpaceNdNhwcTest4) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt5, BatchToSpaceNdNhwcTest5) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt6, BatchToSpaceNdNhwcTest6) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcInt7, BatchToSpaceNdNhwcTest7) + ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint1, BatchToSpaceNdNhwcTest1) ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint2, BatchToSpaceNdNhwcTest2) ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint3, BatchToSpaceNdNhwcTest3) @@ -1293,6 +1421,14 @@ ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat16_5, BatchToSpaceNdNchwTest5) ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat16_7, BatchToSpaceNdNchwTest7) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt1, BatchToSpaceNdNchwTest1) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt2, BatchToSpaceNdNchwTest2) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt3, BatchToSpaceNdNchwTest3) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt4, BatchToSpaceNdNchwTest4) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt5, BatchToSpaceNdNchwTest5) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt6, BatchToSpaceNdNchwTest6) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwInt7, BatchToSpaceNdNchwTest7) + ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint1, BatchToSpaceNdNchwTest1) ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint2, BatchToSpaceNdNchwTest2) ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint3, BatchToSpaceNdNchwTest3) @@ -1320,6 +1456,11 @@ ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_2, DepthToSpaceTest2, DataLayout::NCHW); ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_4, DepthToSpaceTest4, DataLayout::NCHW); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_1, DepthToSpaceTest1, DataLayout::NCHW); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_2, DepthToSpaceTest2, DataLayout::NCHW); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_3, DepthToSpaceTest3, DataLayout::NCHW); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_4, DepthToSpaceTest4, DataLayout::NCHW); + ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_1, DepthToSpaceTest1, DataLayout::NCHW); ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_2, DepthToSpaceTest2, DataLayout::NCHW); ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_3, DepthToSpaceTest3, DataLayout::NCHW); @@ -1340,6 +1481,11 @@ ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_2, DepthToSpaceTest2, DataLayout::NHWC); ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_4, DepthToSpaceTest4, DataLayout::NHWC); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_1, DepthToSpaceTest1, DataLayout::NHWC); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_2, DepthToSpaceTest2, DataLayout::NHWC); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_3, DepthToSpaceTest3, DataLayout::NHWC); +ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_4, DepthToSpaceTest4, DataLayout::NHWC); + ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_1, DepthToSpaceTest1, DataLayout::NHWC); ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_2, DepthToSpaceTest2, DataLayout::NHWC); ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_3, DepthToSpaceTest3, DataLayout::NHWC); @@ -1468,6 +1614,8 @@ ARMNN_AUTO_TEST_CASE(Abs2dSigned32, Abs2dTest) ARMNN_AUTO_TEST_CASE(Abs3dSigned32, Abs3dTest) ARMNN_AUTO_TEST_CASE(AbsZeroSigned32, AbsZeroTest) +ARMNN_AUTO_TEST_CASE(Abs2dQuantisedAsymmS8, Abs2dTest) +ARMNN_AUTO_TEST_CASE(Abs3dQuantisedAsymmS8, Abs3dTest) ARMNN_AUTO_TEST_CASE(Abs2dQuantisedAsymm8, Abs2dTest) ARMNN_AUTO_TEST_CASE(Abs3dQuantisedAsymm8, Abs3dTest) ARMNN_AUTO_TEST_CASE(Abs2dQuantisedSymm16, Abs2dTest) @@ -1482,6 +1630,16 @@ BOOST_AUTO_TEST_CASE(DetectionPostProcessFastNmsFloat) { DetectionPostProcessFastNmsFloatTest(); } +BOOST_AUTO_TEST_CASE(DetectionPostProcessRegularNmsInt8) +{ + DetectionPostProcessRegularNmsQuantizedTest< + RefWorkloadFactory, DataType::QAsymmS8>(); +} +BOOST_AUTO_TEST_CASE(DetectionPostProcessFastNmsInt8) +{ + DetectionPostProcessRegularNmsQuantizedTest< + RefWorkloadFactory, DataType::QAsymmS8>(); +} BOOST_AUTO_TEST_CASE(DetectionPostProcessRegularNmsUint8) { DetectionPostProcessRegularNmsQuantizedTest< @@ -1552,6 +1710,10 @@ ARMNN_AUTO_TEST_CASE(SimpleTransposeFloat32, SimpleTransposeTest) ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet2Test, TransposeValueSet2Test) ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet3Test, TransposeValueSet3Test) +ARMNN_AUTO_TEST_CASE(SimpleTransposeQASymmS8, SimpleTransposeTest) +ARMNN_AUTO_TEST_CASE(TransposeQASymmS8ValueSet1Test, TransposeValueSet1Test) +ARMNN_AUTO_TEST_CASE(TransposeQASymmS8ValueSet2Test, TransposeValueSet2Test) +ARMNN_AUTO_TEST_CASE(TransposeQASymmS8ValueSet3Test, TransposeValueSet3Test) ARMNN_AUTO_TEST_CASE(SimpleTransposeQASymm8, SimpleTransposeTest) ARMNN_AUTO_TEST_CASE(TransposeQASymm8ValueSet1Test, TransposeValueSet1Test) ARMNN_AUTO_TEST_CASE(TransposeQASymm8ValueSet2Test, TransposeValueSet2Test) @@ -1570,6 +1732,14 @@ ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dFloatNhwc, SimpleTransposeConvolution2dTest, true, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dInt8Nchw, + SimpleTransposeConvolution2dTest, + true, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dInt8Nhwc, + SimpleTransposeConvolution2dTest, + true, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dUint8Nchw, SimpleTransposeConvolution2dTest, true, @@ -1595,6 +1765,14 @@ ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dFloatNhwc, SimpleTransposeConvolution2dTest, true, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dInt8Nchw, + SimpleTransposeConvolution2dTest, + true, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dInt8Nhwc, + SimpleTransposeConvolution2dTest, + true, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dUint8Nchw, SimpleTransposeConvolution2dTest, true, @@ -1620,6 +1798,14 @@ ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dFloatNhwc, PaddedTransposeConvolution2dTest, true, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dInt8Nchw, + PaddedTransposeConvolution2dTest, + true, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dInt8Nhwc, + PaddedTransposeConvolution2dTest, + true, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dUint8Nchw, PaddedTransposeConvolution2dTest, true, @@ -1645,6 +1831,14 @@ ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dFloatNhwc, PaddedTransposeConvolution2dTest, true, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dInt8Nchw, + PaddedTransposeConvolution2dTest, + true, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dInt8Nhwc, + PaddedTransposeConvolution2dTest, + true, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dUint8Nchw, PaddedTransposeConvolution2dTest, true, @@ -1670,6 +1864,14 @@ ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dFloatNhwc, StridedTransposeConvolution2dTest, true, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dInt8Nchw, + StridedTransposeConvolution2dTest, + true, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dInt8Nhwc, + StridedTransposeConvolution2dTest, + true, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dUint8Nchw, StridedTransposeConvolution2dTest, true, @@ -1695,6 +1897,14 @@ ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dFloatNhwc, StridedTransposeConvolution2dTest, true, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dInt8Nchw, + StridedTransposeConvolution2dTest, + true, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dInt8Nhwc, + StridedTransposeConvolution2dTest, + true, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dUint8Nchw, StridedTransposeConvolution2dTest, true, @@ -1718,6 +1928,12 @@ ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dFloatNchw, ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dFloatNhwc, MultiChannelTransposeConvolution2dTest, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dInt8Nchw, + MultiChannelTransposeConvolution2dTest, + DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dInt8Nhwc, + MultiChannelTransposeConvolution2dTest, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dUint8Nchw, MultiChannelTransposeConvolution2dTest, DataLayout::NCHW) @@ -1754,6 +1970,8 @@ ARMNN_AUTO_TEST_CASE(NegZero, NegZeroTest) ARMNN_AUTO_TEST_CASE(NegNegative, NegNegativeTest) ARMNN_AUTO_TEST_CASE(Neg2dFloat16, Neg2dTest) ARMNN_AUTO_TEST_CASE(Neg3dFloat16, Neg3dTest) +ARMNN_AUTO_TEST_CASE(Neg2dQuantisedAsymmS8, Neg2dTest) +ARMNN_AUTO_TEST_CASE(Neg3dQuantisedAsymmS8, Neg3dTest) ARMNN_AUTO_TEST_CASE(Neg2dQuantisedAsymm8, Neg2dTest) ARMNN_AUTO_TEST_CASE(Neg3dQuantisedAsymm8, Neg3dTest) ARMNN_AUTO_TEST_CASE(Neg2dQuantisedSymm16, Neg2dTest) diff --git a/src/backends/reference/workloads/Pad.cpp b/src/backends/reference/workloads/Pad.cpp index ffdd469609..1b634145fc 100644 --- a/src/backends/reference/workloads/Pad.cpp +++ b/src/backends/reference/workloads/Pad.cpp @@ -177,6 +177,12 @@ template void Pad(const TensorInfo& inputInfo, const uint8_t* inputData, uint8_t* outData, const float padValue); +template void Pad(const TensorInfo& inputInfo, + const TensorInfo& outputInfo, + std::vector> m_PadList, + const int8_t* inputData, + int8_t* outData, + const float padValue); template void Pad(const TensorInfo& inputInfo, const TensorInfo& outputInfo, std::vector> m_PadList, diff --git a/src/backends/reference/workloads/RefPadWorkload.cpp b/src/backends/reference/workloads/RefPadWorkload.cpp index 777682d70c..6f82d5ffdb 100644 --- a/src/backends/reference/workloads/RefPadWorkload.cpp +++ b/src/backends/reference/workloads/RefPadWorkload.cpp @@ -36,6 +36,7 @@ void RefPadWorkload::Execute() const template class RefPadWorkload; template class RefPadWorkload; template class RefPadWorkload; +template class RefPadWorkload; template class RefPadWorkload; template class RefPadWorkload; diff --git a/src/backends/reference/workloads/RefPadWorkload.hpp b/src/backends/reference/workloads/RefPadWorkload.hpp index 5134ac8bff..74dcab1967 100644 --- a/src/backends/reference/workloads/RefPadWorkload.hpp +++ b/src/backends/reference/workloads/RefPadWorkload.hpp @@ -31,9 +31,10 @@ public: }; using RefPadBFloat16Workload = RefPadWorkload; -using RefPadFloat32Workload = RefPadWorkload; -using RefPadFloat16Workload = RefPadWorkload; -using RefPadQAsymm8Workload = RefPadWorkload; -using RefPadQSymm16Workload = RefPadWorkload; +using RefPadFloat32Workload = RefPadWorkload; +using RefPadFloat16Workload = RefPadWorkload; +using RefPadQAsymmS8Workload = RefPadWorkload; +using RefPadQAsymm8Workload = RefPadWorkload; +using RefPadQSymm16Workload = RefPadWorkload; } //namespace armnn diff --git a/src/backends/reference/workloads/RefPermuteWorkload.cpp b/src/backends/reference/workloads/RefPermuteWorkload.cpp index 5751ed80a3..75e9d0acf0 100644 --- a/src/backends/reference/workloads/RefPermuteWorkload.cpp +++ b/src/backends/reference/workloads/RefPermuteWorkload.cpp @@ -31,6 +31,7 @@ void RefPermuteWorkload::Execute() const template class RefPermuteWorkload; template class RefPermuteWorkload; template class RefPermuteWorkload; +template class RefPermuteWorkload; template class RefPermuteWorkload; template class RefPermuteWorkload; diff --git a/src/backends/reference/workloads/RefPermuteWorkload.hpp b/src/backends/reference/workloads/RefPermuteWorkload.hpp index a8d308e47c..b9f259a8f8 100644 --- a/src/backends/reference/workloads/RefPermuteWorkload.hpp +++ b/src/backends/reference/workloads/RefPermuteWorkload.hpp @@ -28,9 +28,10 @@ public: }; using RefPermuteBFloat16Workload = RefPermuteWorkload; -using RefPermuteFloat16Workload = RefPermuteWorkload; -using RefPermuteFloat32Workload = RefPermuteWorkload; -using RefPermuteQAsymm8Workload = RefPermuteWorkload; -using RefPermuteQSymm16Workload = RefPermuteWorkload; +using RefPermuteFloat16Workload = RefPermuteWorkload; +using RefPermuteFloat32Workload = RefPermuteWorkload; +using RefPermuteQAsymmS8Workload = RefPermuteWorkload; +using RefPermuteQAsymm8Workload = RefPermuteWorkload; +using RefPermuteQSymm16Workload = RefPermuteWorkload; } //namespace armnn \ No newline at end of file diff --git a/src/backends/reference/workloads/RefTransposeWorkload.cpp b/src/backends/reference/workloads/RefTransposeWorkload.cpp index 242668b6b1..4e027bee2e 100644 --- a/src/backends/reference/workloads/RefTransposeWorkload.cpp +++ b/src/backends/reference/workloads/RefTransposeWorkload.cpp @@ -30,6 +30,7 @@ void RefTransposeWorkload::Execute() const template class RefTransposeWorkload; template class RefTransposeWorkload; template class RefTransposeWorkload; +template class RefTransposeWorkload; template class RefTransposeWorkload; template class RefTransposeWorkload; diff --git a/src/backends/reference/workloads/RefTransposeWorkload.hpp b/src/backends/reference/workloads/RefTransposeWorkload.hpp index dcfe618b75..387572aab9 100644 --- a/src/backends/reference/workloads/RefTransposeWorkload.hpp +++ b/src/backends/reference/workloads/RefTransposeWorkload.hpp @@ -30,6 +30,7 @@ public: using RefTransposeBFloat16Workload = RefTransposeWorkload; using RefTransposeFloat16Workload = RefTransposeWorkload; using RefTransposeFloat32Workload = RefTransposeWorkload; +using RefTransposeQAsymmS8Workload = RefTransposeWorkload; using RefTransposeQAsymm8Workload = RefTransposeWorkload; using RefTransposeQSymm16Workload = RefTransposeWorkload; -- cgit v1.2.1