From 303980c502c721f13d65e7087be6c0758df65044 Mon Sep 17 00:00:00 2001 From: Sadik Armagan Date: Fri, 17 Apr 2020 12:45:14 +0100 Subject: IVGCVSW-4668 Add TENSOR_QUANT8_ASYMM_SIGNED data type support to CpuRef operators Signed-off-by: Teresa Charlin Signed-off-by: Sadik Armagan Change-Id: I094125ba80699cc3cf5226bda6662a54e6caa988 --- src/backends/reference/RefLayerSupport.cpp | 140 ++++++++++++++++++----------- 1 file changed, 86 insertions(+), 54 deletions(-) (limited to 'src/backends/reference/RefLayerSupport.cpp') diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp index 25d639a38a..65ae14ff40 100644 --- a/src/backends/reference/RefLayerSupport.cpp +++ b/src/backends/reference/RefLayerSupport.cpp @@ -181,10 +181,11 @@ bool RefLayerSupport::IsArgMinMaxSupported(const armnn::TensorInfo &input, const { IgnoreUnused(descriptor); - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16, DataType::Signed32 @@ -211,11 +212,12 @@ bool RefLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input, { IgnoreUnused(descriptor); - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -260,11 +262,12 @@ bool RefLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input, std::string outputTensorStr = "output"; // Define supported types. - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -302,12 +305,13 @@ bool RefLayerSupport::IsComparisonSupported(const TensorInfo& input0, Optional reasonIfUnsupported) const { IgnoreUnused(descriptor); - std::array supportedInputTypes = + std::array supportedInputTypes = { DataType::Boolean, DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16, DataType::Signed32 @@ -339,8 +343,8 @@ bool RefLayerSupport::IsConcatSupported(const std::vector inp DataType::BFloat16, DataType::Float32, DataType::Float16, - DataType::QAsymmU8, DataType::QAsymmS8, + DataType::QAsymmU8, DataType::QSymmS16 }; @@ -366,11 +370,11 @@ bool RefLayerSupport::IsConstantSupported(const TensorInfo& output, { DataType::BFloat16, DataType::Float32, - DataType::Signed32, - DataType::QAsymmU8, DataType::QAsymmS8, + DataType::QAsymmU8, DataType::QSymmS8, - DataType::QSymmS16 + DataType::QSymmS16, + DataType::Signed32 }; return CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported, @@ -462,8 +466,8 @@ bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input, DataType::BFloat16, DataType::Float32, DataType::Float16, - DataType::QAsymmU8, DataType::QAsymmS8, + DataType::QAsymmU8, DataType::QSymmS8, DataType::QSymmS16 }; @@ -495,9 +499,9 @@ bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input, ARMNN_NO_DEPRECATE_WARN_BEGIN std::array supportedWeightTypes = { + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS8, - DataType::QAsymmS8, DataType::QuantizedSymm8PerAxis // deprecated }; ARMNN_NO_DEPRECATE_WARN_END @@ -543,8 +547,8 @@ bool RefLayerSupport::IsDebugSupported(const TensorInfo& input, DataType::BFloat16, DataType::Float16, DataType::Float32, - DataType::QAsymmU8, DataType::QAsymmS8, + DataType::QAsymmU8, DataType::QSymmS8, DataType::QSymmS16, DataType::Signed32 @@ -570,11 +574,12 @@ bool RefLayerSupport::IsDepthToSpaceSupported(const TensorInfo& input, IgnoreUnused(descriptor); bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -598,6 +603,7 @@ bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input, const Optional& biases, Optional reasonIfUnsupported) const { + IgnoreUnused(descriptor); bool supported = true; // Define supported types. @@ -606,9 +612,9 @@ bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input, DataType::BFloat16, DataType::Float32, DataType::Float16, - DataType::QSymmS8, DataType::QAsymmS8, DataType::QAsymmU8, + DataType::QSymmS8, DataType::QSymmS16 }; @@ -621,21 +627,22 @@ bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input, supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported, "Reference DepthwiseConvolution2d: input and output types mismatched."); - ARMNN_NO_DEPRECATE_WARN_BEGIN - std::array supportedWeightTypes = - { - DataType::QAsymmU8, - DataType::QSymmS8, - DataType::QuantizedSymm8PerAxis // deprecated - }; - ARMNN_NO_DEPRECATE_WARN_END - const DataType inputType = input.GetDataType(); if (IsQuantized8BitType(inputType)) { + ARMNN_NO_DEPRECATE_WARN_BEGIN + std::array supportedWeightTypes = + { + DataType::QAsymmS8, + DataType::QAsymmU8, + DataType::QSymmS8, + DataType::QuantizedSymm8PerAxis // deprecated + }; + ARMNN_NO_DEPRECATE_WARN_END supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported, - "Reference convolution2d: weights type not supported for quantized input."); + "Reference DepthwiseConvolution2d: weights type not supported for " + "quantized input."); } else { @@ -658,7 +665,6 @@ bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input, supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported, "Reference DepthwiseConvolution2d: biases is not a supported type."); } - IgnoreUnused(descriptor); return supported; @@ -716,10 +722,11 @@ bool RefLayerSupport::IsDetectionPostProcessSupported(const TensorInfo& boxEncod bool supported = true; - std::array supportedInputTypes = + std::array supportedInputTypes = { DataType::BFloat16, DataType::Float32, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -750,10 +757,11 @@ bool RefLayerSupport::IsDivisionSupported(const TensorInfo& input0, { bool supported = true; - std::array supportedTypes = { + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -786,11 +794,12 @@ bool RefLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input, { IgnoreUnused(descriptor); - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16, DataType::Signed32 @@ -883,8 +892,8 @@ bool RefLayerSupport::IsFullyConnectedSupported(const TensorInfo& input, DataType::BFloat16, DataType::Float32, DataType::Float16, - DataType::QAsymmU8, DataType::QAsymmS8, + DataType::QAsymmU8, DataType::QSymmS16 }; @@ -913,8 +922,9 @@ bool RefLayerSupport::IsFullyConnectedSupported(const TensorInfo& input, } ARMNN_NO_DEPRECATE_WARN_BEGIN - std::array supportedWeightTypes = + std::array supportedWeightTypes = { + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS8, DataType::QuantizedSymm8PerAxis // deprecated @@ -969,11 +979,12 @@ bool RefLayerSupport::IsGatherSupported(const armnn::TensorInfo& input0, armnn::Optional reasonIfUnsupported) const { bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1050,11 +1061,12 @@ bool RefLayerSupport::IsL2NormalizationSupported(const TensorInfo& input, { IgnoreUnused(descriptor); // Define supported types - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1261,11 +1273,12 @@ bool RefLayerSupport::IsMeanSupported(const TensorInfo& input, std::string meanLayerStr = "Mean"; std::string outputTensorStr = "output"; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1328,11 +1341,12 @@ bool RefLayerSupport::IsMemCopySupported(const TensorInfo &input, { bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16, DataType::Boolean @@ -1357,10 +1371,11 @@ bool RefLayerSupport::IsMinimumSupported(const TensorInfo& input0, { bool supported = true; - std::array supportedTypes = { + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1397,8 +1412,8 @@ bool RefLayerSupport::IsMultiplicationSupported(const TensorInfo& input0, DataType::BFloat16, DataType::Float32, DataType::Float16, - DataType::QAsymmU8, DataType::QAsymmS8, + DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1431,11 +1446,12 @@ bool RefLayerSupport::IsNormalizationSupported(const TensorInfo& input, IgnoreUnused(descriptor); // Define supported types - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float16, DataType::Float32, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1470,11 +1486,12 @@ bool RefLayerSupport::IsPadSupported(const TensorInfo& input, bool supported = true; // Define supported output and inputs types. - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1500,11 +1517,12 @@ bool RefLayerSupport::IsPermuteSupported(const TensorInfo& input, bool supported = true; // Define supported output and inputs types. - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1574,8 +1592,8 @@ bool RefLayerSupport::IsQuantizeSupported(const TensorInfo& input, // Define supported output types. std::array supportedOutputTypes = { - DataType::QAsymmU8, DataType::QAsymmS8, + DataType::QAsymmU8, DataType::QSymmS8, DataType::QSymmS16 }; @@ -1616,11 +1634,12 @@ bool RefLayerSupport::IsResizeBilinearSupported(const TensorInfo& input, Optional reasonIfUnsupported) const { bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1649,8 +1668,8 @@ bool RefLayerSupport::IsResizeSupported(const TensorInfo& input, DataType::BFloat16, DataType::Float32, DataType::Float16, - DataType::QAsymmU8, DataType::QAsymmS8, + DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1684,10 +1703,11 @@ bool RefLayerSupport::IsSliceSupported(const TensorInfo& input, IgnoreUnused(descriptor); bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1741,11 +1761,12 @@ bool RefLayerSupport::IsSpaceToBatchNdSupported(const TensorInfo& input, { IgnoreUnused(descriptor); bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1771,11 +1792,12 @@ bool RefLayerSupport::IsSpaceToDepthSupported(const TensorInfo& input, IgnoreUnused(descriptor); bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1798,11 +1820,12 @@ bool RefLayerSupport::IsSplitterSupported(const TensorInfo& input, { IgnoreUnused(descriptor); bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1820,11 +1843,12 @@ bool RefLayerSupport::IsSplitterSupported(const TensorInfo& input, { IgnoreUnused(descriptor); bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1851,11 +1875,12 @@ bool RefLayerSupport::IsStackSupported(const std::vector& inp IgnoreUnused(descriptor); bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1883,10 +1908,11 @@ bool RefLayerSupport::IsStridedSliceSupported(const TensorInfo& input, IgnoreUnused(descriptor); bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1910,10 +1936,11 @@ bool RefLayerSupport::IsSubtractionSupported(const TensorInfo& input0, { bool supported = true; - std::array supportedTypes = { + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1946,11 +1973,12 @@ bool RefLayerSupport::IsPreluSupported(const TensorInfo& input, { bool supported = true; - std::array supportedTypes + std::array supportedTypes { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; @@ -1983,12 +2011,14 @@ bool RefLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input, IgnoreUnused(descriptor); bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, + DataType::QSymmS8, DataType::QSymmS16 }; @@ -2003,11 +2033,12 @@ bool RefLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input, const DataType inputType = input.GetDataType(); - if (inputType == DataType::QAsymmU8) + if (IsQuantized8BitType(inputType)) { ARMNN_NO_DEPRECATE_WARN_BEGIN - std::array supportedWeightTypes = + std::array supportedWeightTypes = { + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS8, DataType::QuantizedSymm8PerAxis //Deprecated @@ -2052,11 +2083,12 @@ bool RefLayerSupport::IsTransposeSupported(const TensorInfo& input, bool supported = true; // Define supported output and inputs types. - std::array supportedTypes = + std::array supportedTypes = { DataType::BFloat16, DataType::Float32, DataType::Float16, + DataType::QAsymmS8, DataType::QAsymmU8, DataType::QSymmS16 }; -- cgit v1.2.1